* [PATCH v7 01/28] ring-buffer: Add page statistics to the meta-page
2025-10-03 13:37 [PATCH v7 00/28] Tracefs support for pKVM Vincent Donnefort
@ 2025-10-03 13:37 ` Vincent Donnefort
2025-10-03 13:37 ` [PATCH v7 02/28] ring-buffer: Store bpage pointers into subbuf_ids Vincent Donnefort
` (26 subsequent siblings)
27 siblings, 0 replies; 33+ messages in thread
From: Vincent Donnefort @ 2025-10-03 13:37 UTC (permalink / raw)
To: rostedt, mhiramat, mathieu.desnoyers, linux-trace-kernel, maz,
oliver.upton, joey.gouly, suzuki.poulose, yuzenghui
Cc: kvmarm, linux-arm-kernel, jstultz, qperret, will, aneesh.kumar,
kernel-team, linux-kernel, Vincent Donnefort
Add two fields pages_touched and pages_lost to the ring-buffer
meta-page. Those fields are useful to get the number of used pages in
the ring-buffer.
Signed-off-by: Vincent Donnefort <vdonnefort@google.com>
diff --git a/include/uapi/linux/trace_mmap.h b/include/uapi/linux/trace_mmap.h
index c102ef35d11e..e8185889a1c8 100644
--- a/include/uapi/linux/trace_mmap.h
+++ b/include/uapi/linux/trace_mmap.h
@@ -17,8 +17,8 @@
* @entries: Number of entries in the ring-buffer.
* @overrun: Number of entries lost in the ring-buffer.
* @read: Number of entries that have been read.
- * @Reserved1: Internal use only.
- * @Reserved2: Internal use only.
+ * @pages_lost: Number of pages overwritten by the writer.
+ * @pages_touched: Number of pages written by the writer.
*/
struct trace_buffer_meta {
__u32 meta_page_size;
@@ -39,8 +39,8 @@ struct trace_buffer_meta {
__u64 overrun;
__u64 read;
- __u64 Reserved1;
- __u64 Reserved2;
+ __u64 pages_lost;
+ __u64 pages_touched;
};
#define TRACE_MMAP_IOCTL_GET_READER _IO('R', 0x20)
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
index 43460949ad3f..2c157cb86989 100644
--- a/kernel/trace/ring_buffer.c
+++ b/kernel/trace/ring_buffer.c
@@ -6113,6 +6113,8 @@ static void rb_update_meta_page(struct ring_buffer_per_cpu *cpu_buffer)
meta->entries = local_read(&cpu_buffer->entries);
meta->overrun = local_read(&cpu_buffer->overrun);
meta->read = cpu_buffer->read;
+ meta->pages_lost = local_read(&cpu_buffer->pages_lost);
+ meta->pages_touched = local_read(&cpu_buffer->pages_touched);
/* Some archs do not have data cache coherency between kernel and user-space */
flush_kernel_vmap_range(cpu_buffer->meta_page, PAGE_SIZE);
--
2.51.0.618.g983fd99d29-goog
^ permalink raw reply related [flat|nested] 33+ messages in thread* [PATCH v7 02/28] ring-buffer: Store bpage pointers into subbuf_ids
2025-10-03 13:37 [PATCH v7 00/28] Tracefs support for pKVM Vincent Donnefort
2025-10-03 13:37 ` [PATCH v7 01/28] ring-buffer: Add page statistics to the meta-page Vincent Donnefort
@ 2025-10-03 13:37 ` Vincent Donnefort
2025-10-03 13:38 ` [PATCH v7 03/28] ring-buffer: Introduce ring-buffer remotes Vincent Donnefort
` (25 subsequent siblings)
27 siblings, 0 replies; 33+ messages in thread
From: Vincent Donnefort @ 2025-10-03 13:37 UTC (permalink / raw)
To: rostedt, mhiramat, mathieu.desnoyers, linux-trace-kernel, maz,
oliver.upton, joey.gouly, suzuki.poulose, yuzenghui
Cc: kvmarm, linux-arm-kernel, jstultz, qperret, will, aneesh.kumar,
kernel-team, linux-kernel, Vincent Donnefort
The subbuf_ids field allows to point to a specific page from the
ring-buffer based on its ID. As a preparation or the upcoming
ring-buffer remote support, point this array to the buffer_page instead
of the buffer_data_page.
Signed-off-by: Vincent Donnefort <vdonnefort@google.com>
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
index 2c157cb86989..e5846f8f3c07 100644
--- a/kernel/trace/ring_buffer.c
+++ b/kernel/trace/ring_buffer.c
@@ -519,7 +519,7 @@ struct ring_buffer_per_cpu {
unsigned int mapped;
unsigned int user_mapped; /* user space mapping */
struct mutex mapping_lock;
- unsigned long *subbuf_ids; /* ID to subbuf VA */
+ struct buffer_page **subbuf_ids; /* ID to subbuf VA */
struct trace_buffer_meta *meta_page;
struct ring_buffer_cpu_meta *ring_meta;
@@ -7002,7 +7002,7 @@ static void rb_free_meta_page(struct ring_buffer_per_cpu *cpu_buffer)
}
static void rb_setup_ids_meta_page(struct ring_buffer_per_cpu *cpu_buffer,
- unsigned long *subbuf_ids)
+ struct buffer_page **subbuf_ids)
{
struct trace_buffer_meta *meta = cpu_buffer->meta_page;
unsigned int nr_subbufs = cpu_buffer->nr_pages + 1;
@@ -7011,7 +7011,7 @@ static void rb_setup_ids_meta_page(struct ring_buffer_per_cpu *cpu_buffer,
int id = 0;
id = rb_page_id(cpu_buffer, cpu_buffer->reader_page, id);
- subbuf_ids[id++] = (unsigned long)cpu_buffer->reader_page->page;
+ subbuf_ids[id++] = cpu_buffer->reader_page;
cnt++;
first_subbuf = subbuf = rb_set_head_page(cpu_buffer);
@@ -7021,7 +7021,7 @@ static void rb_setup_ids_meta_page(struct ring_buffer_per_cpu *cpu_buffer,
if (WARN_ON(id >= nr_subbufs))
break;
- subbuf_ids[id] = (unsigned long)subbuf->page;
+ subbuf_ids[id] = subbuf;
rb_inc_page(&subbuf);
id++;
@@ -7030,7 +7030,7 @@ static void rb_setup_ids_meta_page(struct ring_buffer_per_cpu *cpu_buffer,
WARN_ON(cnt != nr_subbufs);
- /* install subbuf ID to kern VA translation */
+ /* install subbuf ID to bpage translation */
cpu_buffer->subbuf_ids = subbuf_ids;
meta->meta_struct_len = sizeof(*meta);
@@ -7186,13 +7186,15 @@ static int __rb_map_vma(struct ring_buffer_per_cpu *cpu_buffer,
}
while (p < nr_pages) {
+ struct buffer_page *subbuf;
struct page *page;
int off = 0;
if (WARN_ON_ONCE(s >= nr_subbufs))
return -EINVAL;
- page = virt_to_page((void *)cpu_buffer->subbuf_ids[s]);
+ subbuf = cpu_buffer->subbuf_ids[s];
+ page = virt_to_page((void *)subbuf->page);
for (; off < (1 << (subbuf_order)); off++, page++) {
if (p >= nr_pages)
@@ -7219,7 +7221,8 @@ int ring_buffer_map(struct trace_buffer *buffer, int cpu,
struct vm_area_struct *vma)
{
struct ring_buffer_per_cpu *cpu_buffer;
- unsigned long flags, *subbuf_ids;
+ struct buffer_page **subbuf_ids;
+ unsigned long flags;
int err;
if (!cpumask_test_cpu(cpu, buffer->cpumask))
@@ -7243,7 +7246,7 @@ int ring_buffer_map(struct trace_buffer *buffer, int cpu,
if (err)
return err;
- /* subbuf_ids include the reader while nr_pages does not */
+ /* subbuf_ids includes the reader while nr_pages does not */
subbuf_ids = kcalloc(cpu_buffer->nr_pages + 1, sizeof(*subbuf_ids), GFP_KERNEL);
if (!subbuf_ids) {
rb_free_meta_page(cpu_buffer);
--
2.51.0.618.g983fd99d29-goog
^ permalink raw reply related [flat|nested] 33+ messages in thread* [PATCH v7 03/28] ring-buffer: Introduce ring-buffer remotes
2025-10-03 13:37 [PATCH v7 00/28] Tracefs support for pKVM Vincent Donnefort
2025-10-03 13:37 ` [PATCH v7 01/28] ring-buffer: Add page statistics to the meta-page Vincent Donnefort
2025-10-03 13:37 ` [PATCH v7 02/28] ring-buffer: Store bpage pointers into subbuf_ids Vincent Donnefort
@ 2025-10-03 13:38 ` Vincent Donnefort
2025-10-03 13:38 ` [PATCH v7 04/28] ring-buffer: Add non-consuming read for " Vincent Donnefort
` (24 subsequent siblings)
27 siblings, 0 replies; 33+ messages in thread
From: Vincent Donnefort @ 2025-10-03 13:38 UTC (permalink / raw)
To: rostedt, mhiramat, mathieu.desnoyers, linux-trace-kernel, maz,
oliver.upton, joey.gouly, suzuki.poulose, yuzenghui
Cc: kvmarm, linux-arm-kernel, jstultz, qperret, will, aneesh.kumar,
kernel-team, linux-kernel, Vincent Donnefort
A ring-buffer remote is an entity outside of the kernel (most likely a
firmware or a hypervisor) capable of writing events in a ring-buffer
following the same format as the tracefs ring-buffer.
To setup the ring-buffer on the kernel side, a description of the pages
forming the ring-buffer (struct trace_buffer_desc) must be given.
Callbacks (swap_reader_page and reset) must also be provided.
It is expected from the remote to keep the meta-page updated.
Signed-off-by: Vincent Donnefort <vdonnefort@google.com>
diff --git a/include/linux/ring_buffer.h b/include/linux/ring_buffer.h
index 876358cfe1b1..41193c5b0d28 100644
--- a/include/linux/ring_buffer.h
+++ b/include/linux/ring_buffer.h
@@ -250,4 +250,62 @@ int ring_buffer_map(struct trace_buffer *buffer, int cpu,
struct vm_area_struct *vma);
int ring_buffer_unmap(struct trace_buffer *buffer, int cpu);
int ring_buffer_map_get_reader(struct trace_buffer *buffer, int cpu);
+
+struct ring_buffer_desc {
+ int cpu;
+ unsigned int nr_page_va; /* excludes the meta page */
+ unsigned long meta_va;
+ unsigned long page_va[] __counted_by(nr_page_va);
+};
+
+struct trace_buffer_desc {
+ int nr_cpus;
+ size_t struct_len;
+ char __data[]; /* list of ring_buffer_desc */
+};
+
+static inline struct ring_buffer_desc *__next_ring_buffer_desc(struct ring_buffer_desc *desc)
+{
+ size_t len = struct_size(desc, page_va, desc->nr_page_va);
+
+ return (struct ring_buffer_desc *)((void *)desc + len);
+}
+
+static inline struct ring_buffer_desc *__first_ring_buffer_desc(struct trace_buffer_desc *desc)
+{
+ return (struct ring_buffer_desc *)(&desc->__data[0]);
+}
+
+static inline size_t trace_buffer_desc_size(size_t buffer_size, unsigned int nr_cpus)
+{
+ unsigned int nr_pages = max(DIV_ROUND_UP(buffer_size, PAGE_SIZE), 2UL) + 1;
+ struct ring_buffer_desc *rbdesc;
+
+ return size_add(offsetof(struct trace_buffer_desc, __data),
+ size_mul(nr_cpus, struct_size(rbdesc, page_va, nr_pages)));
+}
+
+#define for_each_ring_buffer_desc(__pdesc, __cpu, __trace_pdesc) \
+ for (__pdesc = __first_ring_buffer_desc(__trace_pdesc), __cpu = 0; \
+ (__cpu) < (__trace_pdesc)->nr_cpus; \
+ (__cpu)++, __pdesc = __next_ring_buffer_desc(__pdesc))
+
+struct ring_buffer_remote {
+ struct trace_buffer_desc *desc;
+ int (*swap_reader_page)(unsigned int cpu, void *priv);
+ int (*reset)(unsigned int cpu, void *priv);
+ void *priv;
+};
+
+int ring_buffer_poll_remote(struct trace_buffer *buffer, int cpu);
+
+struct trace_buffer *
+__ring_buffer_alloc_remote(struct ring_buffer_remote *remote,
+ struct lock_class_key *key);
+
+#define ring_buffer_alloc_remote(remote) \
+({ \
+ static struct lock_class_key __key; \
+ __ring_buffer_alloc_remote(remote, &__key); \
+})
#endif /* _LINUX_RING_BUFFER_H */
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
index e5846f8f3c07..54f3f47b392b 100644
--- a/kernel/trace/ring_buffer.c
+++ b/kernel/trace/ring_buffer.c
@@ -523,6 +523,8 @@ struct ring_buffer_per_cpu {
struct trace_buffer_meta *meta_page;
struct ring_buffer_cpu_meta *ring_meta;
+ struct ring_buffer_remote *remote;
+
/* ring buffer pages to update, > 0 to add, < 0 to remove */
long nr_pages_to_update;
struct list_head new_pages; /* new pages to add */
@@ -545,6 +547,8 @@ struct trace_buffer {
struct ring_buffer_per_cpu **buffers;
+ struct ring_buffer_remote *remote;
+
struct hlist_node node;
u64 (*clock)(void);
@@ -2197,6 +2201,40 @@ static void rb_meta_buffer_update(struct ring_buffer_per_cpu *cpu_buffer,
}
}
+static struct ring_buffer_desc *ring_buffer_desc(struct trace_buffer_desc *trace_desc, int cpu)
+{
+ struct ring_buffer_desc *desc, *end;
+ size_t len;
+ int i;
+
+ if (!trace_desc)
+ return NULL;
+
+ if (cpu >= trace_desc->nr_cpus)
+ return NULL;
+
+ end = (struct ring_buffer_desc *)((void *)trace_desc + trace_desc->struct_len);
+ desc = __first_ring_buffer_desc(trace_desc);
+ len = struct_size(desc, page_va, desc->nr_page_va);
+ desc = (struct ring_buffer_desc *)((void *)desc + (len * cpu));
+
+ if (desc < end && desc->cpu == cpu)
+ return desc;
+
+ /* Missing CPUs, need to linear search */
+ for_each_ring_buffer_desc(desc, i, trace_desc) {
+ if (desc->cpu == cpu)
+ return desc;
+ }
+
+ return NULL;
+}
+
+static void *ring_buffer_desc_page(struct ring_buffer_desc *desc, int page_id)
+{
+ return page_id > desc->nr_page_va ? NULL : (void *)desc->page_va[page_id];
+}
+
static int __rb_allocate_pages(struct ring_buffer_per_cpu *cpu_buffer,
long nr_pages, struct list_head *pages)
{
@@ -2204,6 +2242,7 @@ static int __rb_allocate_pages(struct ring_buffer_per_cpu *cpu_buffer,
struct ring_buffer_cpu_meta *meta = NULL;
struct buffer_page *bpage, *tmp;
bool user_thread = current->mm != NULL;
+ struct ring_buffer_desc *desc = NULL;
gfp_t mflags;
long i;
@@ -2240,6 +2279,12 @@ static int __rb_allocate_pages(struct ring_buffer_per_cpu *cpu_buffer,
if (buffer->range_addr_start)
meta = rb_range_meta(buffer, nr_pages, cpu_buffer->cpu);
+ if (buffer->remote) {
+ desc = ring_buffer_desc(buffer->remote->desc, cpu_buffer->cpu);
+ if (!desc || WARN_ON(desc->nr_page_va != (nr_pages + 1)))
+ return -EINVAL;
+ }
+
for (i = 0; i < nr_pages; i++) {
struct page *page;
@@ -2266,6 +2311,16 @@ static int __rb_allocate_pages(struct ring_buffer_per_cpu *cpu_buffer,
rb_meta_buffer_update(cpu_buffer, bpage);
bpage->range = 1;
bpage->id = i + 1;
+ } else if (desc) {
+ void *p = ring_buffer_desc_page(desc, i + 1);
+
+ if (WARN_ON(!p))
+ goto free_pages;
+
+ bpage->page = p;
+ bpage->range = 1; /* bpage->page can't be freed */
+ bpage->id = i + 1;
+ cpu_buffer->subbuf_ids[i + 1] = bpage;
} else {
page = alloc_pages_node(cpu_to_node(cpu_buffer->cpu),
mflags | __GFP_COMP | __GFP_ZERO,
@@ -2369,6 +2424,30 @@ rb_allocate_cpu_buffer(struct trace_buffer *buffer, long nr_pages, int cpu)
if (cpu_buffer->ring_meta->head_buffer)
rb_meta_buffer_update(cpu_buffer, bpage);
bpage->range = 1;
+ } else if (buffer->remote) {
+ struct ring_buffer_desc *desc = ring_buffer_desc(buffer->remote->desc, cpu);
+
+ if (!desc)
+ goto fail_free_reader;
+
+ cpu_buffer->remote = buffer->remote;
+ cpu_buffer->meta_page = (struct trace_buffer_meta *)(void *)desc->meta_va;
+ cpu_buffer->nr_pages = nr_pages;
+ cpu_buffer->subbuf_ids = kcalloc(cpu_buffer->nr_pages + 1,
+ sizeof(*cpu_buffer->subbuf_ids), GFP_KERNEL);
+ if (!cpu_buffer->subbuf_ids)
+ goto fail_free_reader;
+
+ /* Remote buffers are read-only and immutable */
+ atomic_inc(&cpu_buffer->record_disabled);
+ atomic_inc(&cpu_buffer->resize_disabled);
+
+ bpage->page = ring_buffer_desc_page(desc, cpu_buffer->meta_page->reader.id);
+ if (!bpage->page)
+ goto fail_free_reader;
+
+ bpage->range = 1;
+ cpu_buffer->subbuf_ids[0] = bpage;
} else {
page = alloc_pages_node(cpu_to_node(cpu),
GFP_KERNEL | __GFP_COMP | __GFP_ZERO,
@@ -2431,6 +2510,9 @@ static void rb_free_cpu_buffer(struct ring_buffer_per_cpu *cpu_buffer)
irq_work_sync(&cpu_buffer->irq_work.work);
+ if (cpu_buffer->remote)
+ kfree(cpu_buffer->subbuf_ids);
+
free_buffer_page(cpu_buffer->reader_page);
if (head) {
@@ -2453,7 +2535,8 @@ static struct trace_buffer *alloc_buffer(unsigned long size, unsigned flags,
int order, unsigned long start,
unsigned long end,
unsigned long scratch_size,
- struct lock_class_key *key)
+ struct lock_class_key *key,
+ struct ring_buffer_remote *remote)
{
struct trace_buffer *buffer __free(kfree) = NULL;
long nr_pages;
@@ -2493,6 +2576,8 @@ static struct trace_buffer *alloc_buffer(unsigned long size, unsigned flags,
if (!buffer->buffers)
goto fail_free_cpumask;
+ cpu = raw_smp_processor_id();
+
/* If start/end are specified, then that overrides size */
if (start && end) {
unsigned long buffers_start;
@@ -2548,6 +2633,15 @@ static struct trace_buffer *alloc_buffer(unsigned long size, unsigned flags,
buffer->range_addr_end = end;
rb_range_meta_init(buffer, nr_pages, scratch_size);
+ } else if (remote) {
+ struct ring_buffer_desc *desc = ring_buffer_desc(remote->desc, cpu);
+
+ buffer->remote = remote;
+ /* The writer is remote. This ring-buffer is read-only */
+ atomic_inc(&buffer->record_disabled);
+ nr_pages = desc->nr_page_va - 1;
+ if (nr_pages < 2)
+ goto fail_free_buffers;
} else {
/* need at least two pages */
@@ -2556,7 +2650,6 @@ static struct trace_buffer *alloc_buffer(unsigned long size, unsigned flags,
nr_pages = 2;
}
- cpu = raw_smp_processor_id();
cpumask_set_cpu(cpu, buffer->cpumask);
buffer->buffers[cpu] = rb_allocate_cpu_buffer(buffer, nr_pages, cpu);
if (!buffer->buffers[cpu])
@@ -2598,7 +2691,7 @@ struct trace_buffer *__ring_buffer_alloc(unsigned long size, unsigned flags,
struct lock_class_key *key)
{
/* Default buffer page size - one system page */
- return alloc_buffer(size, flags, 0, 0, 0, 0, key);
+ return alloc_buffer(size, flags, 0, 0, 0, 0, key, NULL);
}
EXPORT_SYMBOL_GPL(__ring_buffer_alloc);
@@ -2625,7 +2718,18 @@ struct trace_buffer *__ring_buffer_alloc_range(unsigned long size, unsigned flag
struct lock_class_key *key)
{
return alloc_buffer(size, flags, order, start, start + range_size,
- scratch_size, key);
+ scratch_size, key, NULL);
+}
+
+/**
+ * __ring_buffer_alloc_remote - allocate a new ring_buffer from a remote
+ * @remote: Contains a description of the ring-buffer pages and remote callbacks.
+ * @key: ring buffer reader_lock_key.
+ */
+struct trace_buffer *__ring_buffer_alloc_remote(struct ring_buffer_remote *remote,
+ struct lock_class_key *key)
+{
+ return alloc_buffer(0, 0, 0, 0, 0, 0, key, remote);
}
void *ring_buffer_meta_scratch(struct trace_buffer *buffer, unsigned int *size)
@@ -5233,6 +5337,16 @@ unsigned long ring_buffer_overruns(struct trace_buffer *buffer)
}
EXPORT_SYMBOL_GPL(ring_buffer_overruns);
+static bool rb_read_remote_meta_page(struct ring_buffer_per_cpu *cpu_buffer)
+{
+ local_set(&cpu_buffer->entries, READ_ONCE(cpu_buffer->meta_page->entries));
+ local_set(&cpu_buffer->overrun, READ_ONCE(cpu_buffer->meta_page->overrun));
+ local_set(&cpu_buffer->pages_touched, READ_ONCE(cpu_buffer->meta_page->pages_touched));
+ local_set(&cpu_buffer->pages_lost, READ_ONCE(cpu_buffer->meta_page->pages_lost));
+
+ return rb_num_of_entries(cpu_buffer);
+}
+
static void rb_iter_reset(struct ring_buffer_iter *iter)
{
struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
@@ -5387,7 +5501,43 @@ rb_update_iter_read_stamp(struct ring_buffer_iter *iter,
}
static struct buffer_page *
-rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
+__rb_get_reader_page_from_remote(struct ring_buffer_per_cpu *cpu_buffer)
+{
+ struct buffer_page *new_reader, *prev_reader;
+
+ if (!rb_read_remote_meta_page(cpu_buffer))
+ return NULL;
+
+ /* More to read on the reader page */
+ if (cpu_buffer->reader_page->read < rb_page_size(cpu_buffer->reader_page)) {
+ if (!cpu_buffer->reader_page->read)
+ cpu_buffer->read_stamp = cpu_buffer->reader_page->page->time_stamp;
+ return cpu_buffer->reader_page;
+ }
+
+ prev_reader = cpu_buffer->subbuf_ids[cpu_buffer->meta_page->reader.id];
+
+ WARN_ON_ONCE(cpu_buffer->remote->swap_reader_page(cpu_buffer->cpu,
+ cpu_buffer->remote->priv));
+ /* nr_pages doesn't include the reader page */
+ if (WARN_ON_ONCE(cpu_buffer->meta_page->reader.id > cpu_buffer->nr_pages))
+ return NULL;
+
+ new_reader = cpu_buffer->subbuf_ids[cpu_buffer->meta_page->reader.id];
+
+ WARN_ON_ONCE(prev_reader == new_reader);
+
+ cpu_buffer->reader_page->page = new_reader->page;
+ cpu_buffer->reader_page->id = new_reader->id;
+ cpu_buffer->reader_page->read = 0;
+ cpu_buffer->read_stamp = cpu_buffer->reader_page->page->time_stamp;
+ cpu_buffer->lost_events = cpu_buffer->meta_page->reader.lost_events;
+
+ return rb_page_size(cpu_buffer->reader_page) ? cpu_buffer->reader_page : NULL;
+}
+
+static struct buffer_page *
+__rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
{
struct buffer_page *reader = NULL;
unsigned long bsize = READ_ONCE(cpu_buffer->buffer->subbuf_size);
@@ -5557,6 +5707,13 @@ rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
return reader;
}
+static struct buffer_page *
+rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
+{
+ return cpu_buffer->remote ? __rb_get_reader_page_from_remote(cpu_buffer) :
+ __rb_get_reader_page(cpu_buffer);
+}
+
static void rb_advance_reader(struct ring_buffer_per_cpu *cpu_buffer)
{
struct ring_buffer_event *event;
@@ -5957,7 +6114,7 @@ ring_buffer_read_start(struct trace_buffer *buffer, int cpu, gfp_t flags)
struct ring_buffer_per_cpu *cpu_buffer;
struct ring_buffer_iter *iter;
- if (!cpumask_test_cpu(cpu, buffer->cpumask))
+ if (!cpumask_test_cpu(cpu, buffer->cpumask) || buffer->remote)
return NULL;
iter = kzalloc(sizeof(*iter), flags);
@@ -6125,6 +6282,23 @@ rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer)
{
struct buffer_page *page;
+ if (cpu_buffer->remote) {
+ if (!cpu_buffer->remote->reset)
+ return;
+
+ cpu_buffer->remote->reset(cpu_buffer->cpu, cpu_buffer->remote->priv);
+ rb_read_remote_meta_page(cpu_buffer);
+
+ /* Read related values, not covered by the meta-page */
+ local_set(&cpu_buffer->pages_read, 0);
+ cpu_buffer->read = 0;
+ cpu_buffer->read_bytes = 0;
+ cpu_buffer->last_overrun = 0;
+ cpu_buffer->reader_page->read = 0;
+
+ return;
+ }
+
rb_head_page_deactivate(cpu_buffer);
cpu_buffer->head_page
@@ -6355,6 +6529,48 @@ bool ring_buffer_empty_cpu(struct trace_buffer *buffer, int cpu)
}
EXPORT_SYMBOL_GPL(ring_buffer_empty_cpu);
+int ring_buffer_poll_remote(struct trace_buffer *buffer, int cpu)
+{
+ struct ring_buffer_per_cpu *cpu_buffer;
+
+ if (cpu != RING_BUFFER_ALL_CPUS) {
+ if (!cpumask_test_cpu(cpu, buffer->cpumask))
+ return -EINVAL;
+
+ cpu_buffer = buffer->buffers[cpu];
+
+ guard(raw_spinlock)(&cpu_buffer->reader_lock);
+ if (rb_read_remote_meta_page(cpu_buffer))
+ rb_wakeups(buffer, cpu_buffer);
+
+ return 0;
+ }
+
+ cpus_read_lock();
+
+ /*
+ * Make sure all the ring buffers are up to date before we start reading
+ * them.
+ */
+ for_each_buffer_cpu(buffer, cpu) {
+ cpu_buffer = buffer->buffers[cpu];
+
+ guard(raw_spinlock)(&cpu_buffer->reader_lock);
+ rb_read_remote_meta_page(cpu_buffer);
+ }
+
+ for_each_buffer_cpu(buffer, cpu) {
+ cpu_buffer = buffer->buffers[cpu];
+
+ if (rb_num_of_entries(cpu_buffer))
+ rb_wakeups(buffer, cpu_buffer);
+ }
+
+ cpus_read_unlock();
+
+ return 0;
+}
+
#ifdef CONFIG_RING_BUFFER_ALLOW_SWAP
/**
* ring_buffer_swap_cpu - swap a CPU buffer between two ring buffers
@@ -6600,6 +6816,7 @@ int ring_buffer_read_page(struct trace_buffer *buffer,
unsigned int commit;
unsigned int read;
u64 save_timestamp;
+ bool force_memcpy;
if (!cpumask_test_cpu(cpu, buffer->cpumask))
return -1;
@@ -6637,6 +6854,8 @@ int ring_buffer_read_page(struct trace_buffer *buffer,
/* Check if any events were dropped */
missed_events = cpu_buffer->lost_events;
+ force_memcpy = cpu_buffer->mapped || cpu_buffer->remote;
+
/*
* If this page has been partially read or
* if len is not big enough to read the rest of the page or
@@ -6646,7 +6865,7 @@ int ring_buffer_read_page(struct trace_buffer *buffer,
*/
if (read || (len < (commit - read)) ||
cpu_buffer->reader_page == cpu_buffer->commit_page ||
- cpu_buffer->mapped) {
+ force_memcpy) {
struct buffer_data_page *rpage = cpu_buffer->reader_page->page;
unsigned int rpos = read;
unsigned int pos = 0;
@@ -7225,7 +7444,7 @@ int ring_buffer_map(struct trace_buffer *buffer, int cpu,
unsigned long flags;
int err;
- if (!cpumask_test_cpu(cpu, buffer->cpumask))
+ if (!cpumask_test_cpu(cpu, buffer->cpumask) || buffer->remote)
return -EINVAL;
cpu_buffer = buffer->buffers[cpu];
--
2.51.0.618.g983fd99d29-goog
^ permalink raw reply related [flat|nested] 33+ messages in thread* [PATCH v7 04/28] ring-buffer: Add non-consuming read for ring-buffer remotes
2025-10-03 13:37 [PATCH v7 00/28] Tracefs support for pKVM Vincent Donnefort
` (2 preceding siblings ...)
2025-10-03 13:38 ` [PATCH v7 03/28] ring-buffer: Introduce ring-buffer remotes Vincent Donnefort
@ 2025-10-03 13:38 ` Vincent Donnefort
2025-10-03 13:38 ` [PATCH v7 05/28] tracing: Introduce trace remotes Vincent Donnefort
` (23 subsequent siblings)
27 siblings, 0 replies; 33+ messages in thread
From: Vincent Donnefort @ 2025-10-03 13:38 UTC (permalink / raw)
To: rostedt, mhiramat, mathieu.desnoyers, linux-trace-kernel, maz,
oliver.upton, joey.gouly, suzuki.poulose, yuzenghui
Cc: kvmarm, linux-arm-kernel, jstultz, qperret, will, aneesh.kumar,
kernel-team, linux-kernel, Vincent Donnefort
Hopefully, the remote will only swap pages on the kernel instruction (via
the swap_reader_page() callback). This means we know at what point the
ring-buffer geometry has changed. It is therefore possible to rearrange
the kernel view of that ring-buffer to allow non-consuming read.
Signed-off-by: Vincent Donnefort <vdonnefort@google.com>
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
index 54f3f47b392b..eb556e691b5e 100644
--- a/kernel/trace/ring_buffer.c
+++ b/kernel/trace/ring_buffer.c
@@ -5347,10 +5347,51 @@ static bool rb_read_remote_meta_page(struct ring_buffer_per_cpu *cpu_buffer)
return rb_num_of_entries(cpu_buffer);
}
+static void rb_update_remote_head(struct ring_buffer_per_cpu *cpu_buffer)
+{
+ struct buffer_page *next, *orig;
+ int retry = 3;
+
+ orig = next = cpu_buffer->head_page;
+ rb_inc_page(&next);
+
+ /* Run after the writer */
+ while (cpu_buffer->head_page->page->time_stamp > next->page->time_stamp) {
+ rb_inc_page(&next);
+
+ rb_list_head_clear(cpu_buffer->head_page->list.prev);
+ rb_inc_page(&cpu_buffer->head_page);
+ rb_set_list_to_head(cpu_buffer->head_page->list.prev);
+
+ if (cpu_buffer->head_page == orig) {
+ if (WARN_ON_ONCE(!(--retry)))
+ return;
+ }
+ }
+
+ orig = cpu_buffer->commit_page = cpu_buffer->head_page;
+ retry = 3;
+
+ while (cpu_buffer->commit_page->page->time_stamp < next->page->time_stamp) {
+ rb_inc_page(&next);
+ rb_inc_page(&cpu_buffer->commit_page);
+
+ if (cpu_buffer->commit_page == orig) {
+ if (WARN_ON_ONCE(!(--retry)))
+ return;
+ }
+ }
+}
+
static void rb_iter_reset(struct ring_buffer_iter *iter)
{
struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
+ if (cpu_buffer->remote) {
+ rb_read_remote_meta_page(cpu_buffer);
+ rb_update_remote_head(cpu_buffer);
+ }
+
/* Iterator usage is expected to have record disabled */
iter->head_page = cpu_buffer->reader_page;
iter->head = cpu_buffer->reader_page->read;
@@ -5503,7 +5544,7 @@ rb_update_iter_read_stamp(struct ring_buffer_iter *iter,
static struct buffer_page *
__rb_get_reader_page_from_remote(struct ring_buffer_per_cpu *cpu_buffer)
{
- struct buffer_page *new_reader, *prev_reader;
+ struct buffer_page *new_reader, *prev_reader, *prev_head, *new_head, *last;
if (!rb_read_remote_meta_page(cpu_buffer))
return NULL;
@@ -5527,10 +5568,32 @@ __rb_get_reader_page_from_remote(struct ring_buffer_per_cpu *cpu_buffer)
WARN_ON_ONCE(prev_reader == new_reader);
- cpu_buffer->reader_page->page = new_reader->page;
- cpu_buffer->reader_page->id = new_reader->id;
- cpu_buffer->reader_page->read = 0;
- cpu_buffer->read_stamp = cpu_buffer->reader_page->page->time_stamp;
+ prev_head = new_reader; /* New reader was also the previous head */
+ new_head = prev_head;
+ rb_inc_page(&new_head);
+ last = prev_head;
+ rb_dec_page(&last);
+
+ /* Clear the old HEAD flag */
+ rb_list_head_clear(cpu_buffer->head_page->list.prev);
+
+ prev_reader->list.next = prev_head->list.next;
+ prev_reader->list.prev = prev_head->list.prev;
+
+ /* Swap prev_reader with new_reader */
+ last->list.next = &prev_reader->list;
+ new_head->list.prev = &prev_reader->list;
+
+ new_reader->list.prev = &new_reader->list;
+ new_reader->list.next = &new_head->list;
+
+ /* Reactivate the HEAD flag */
+ rb_set_list_to_head(&last->list);
+
+ cpu_buffer->head_page = new_head;
+ cpu_buffer->reader_page = new_reader;
+ cpu_buffer->pages = &new_head->list;
+ cpu_buffer->read_stamp = new_reader->page->time_stamp;
cpu_buffer->lost_events = cpu_buffer->meta_page->reader.lost_events;
return rb_page_size(cpu_buffer->reader_page) ? cpu_buffer->reader_page : NULL;
@@ -6114,7 +6177,7 @@ ring_buffer_read_start(struct trace_buffer *buffer, int cpu, gfp_t flags)
struct ring_buffer_per_cpu *cpu_buffer;
struct ring_buffer_iter *iter;
- if (!cpumask_test_cpu(cpu, buffer->cpumask) || buffer->remote)
+ if (!cpumask_test_cpu(cpu, buffer->cpumask))
return NULL;
iter = kzalloc(sizeof(*iter), flags);
--
2.51.0.618.g983fd99d29-goog
^ permalink raw reply related [flat|nested] 33+ messages in thread* [PATCH v7 05/28] tracing: Introduce trace remotes
2025-10-03 13:37 [PATCH v7 00/28] Tracefs support for pKVM Vincent Donnefort
` (3 preceding siblings ...)
2025-10-03 13:38 ` [PATCH v7 04/28] ring-buffer: Add non-consuming read for " Vincent Donnefort
@ 2025-10-03 13:38 ` Vincent Donnefort
2025-10-03 13:38 ` [PATCH v7 06/28] tracing: Add reset to " Vincent Donnefort
` (22 subsequent siblings)
27 siblings, 0 replies; 33+ messages in thread
From: Vincent Donnefort @ 2025-10-03 13:38 UTC (permalink / raw)
To: rostedt, mhiramat, mathieu.desnoyers, linux-trace-kernel, maz,
oliver.upton, joey.gouly, suzuki.poulose, yuzenghui
Cc: kvmarm, linux-arm-kernel, jstultz, qperret, will, aneesh.kumar,
kernel-team, linux-kernel, Vincent Donnefort
A trace remote relies on ring-buffer remotes to read and control
compatible tracing buffers, written by entity such as firmware or
hypervisor.
Add a Tracefs directory remotes/ that contains all instances of trace
remotes. Each instance follows the same hierarchy as any other to ease
the support by existing user-space tools.
This currently does not provide any event support, which will come
later.
Signed-off-by: Vincent Donnefort <vdonnefort@google.com>
diff --git a/include/linux/trace_remote.h b/include/linux/trace_remote.h
new file mode 100644
index 000000000000..feb3433c2128
--- /dev/null
+++ b/include/linux/trace_remote.h
@@ -0,0 +1,80 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef _LINUX_TRACE_REMOTE_H
+#define _LINUX_TRACE_REMOTE_H
+
+#include <linux/ring_buffer.h>
+
+/**
+ * struct trace_remote_callbacks - Callbacks used by Tracefs to control the remote
+ *
+ * @load_trace_buffer: Called before Tracefs accesses the trace buffer for the first
+ * time. Must return a &trace_buffer_desc
+ * (most likely filled with trace_remote_alloc_buffer())
+ * @unload_trace_buffer:
+ * Called once Tracefs has no use for the trace buffer
+ * (most likely call trace_remote_free_buffer())
+ * @enable_tracing: Called on Tracefs tracing_on. It is expected from the
+ * remote to allow writing.
+ * @swap_reader_page: Called when Tracefs consumes a new page from a
+ * ring-buffer. It is expected from the remote to isolate a
+ * new reader-page from the @cpu ring-buffer.
+ */
+struct trace_remote_callbacks {
+ struct trace_buffer_desc *(*load_trace_buffer)(unsigned long size, void *priv);
+ void (*unload_trace_buffer)(struct trace_buffer_desc *desc, void *priv);
+ int (*enable_tracing)(bool enable, void *priv);
+ int (*swap_reader_page)(unsigned int cpu, void *priv);
+};
+
+/**
+ * trace_remote_register() - Register a Tracefs remote
+ *
+ * A trace remote is an entity, outside of the kernel (most likely firmware or
+ * hypervisor) capable of writing events into a Tracefs compatible ring-buffer.
+ * The kernel would then act as a reader.
+ *
+ * The registered remote will be found under the Tracefs directory
+ * remotes/<name>.
+ *
+ * @name: Name of the remote, used for the Tracefs remotes/ directory.
+ * @cbs: Set of callbacks used to control the remote.
+ * @priv: Private data, passed to each callback from @cbs.
+ * @events: Array of events. &remote_event.name and &remote_event.id must be
+ * filled by the caller.
+ * @nr_events: Number of events in the @events array.
+ *
+ * Return: 0 on success, negative error code on failure.
+ */
+int trace_remote_register(const char *name, struct trace_remote_callbacks *cbs, void *priv);
+
+/**
+ * trace_remote_alloc_buffer() - Dynamically allocate a trace buffer
+ *
+ * Helper to dynamically allocate a set of pages (enough to cover @buffer_size)
+ * for each CPU from @cpumask and fill @desc. Most likely called from
+ * &trace_remote_callbacks.load_trace_buffer.
+ *
+ * @desc: Uninitialized trace_buffer_desc
+ * @desc_size: Size of the trace_buffer_desc. Must be at least equal to
+ * trace_buffer_desc_size()
+ * @buffer_size: Size in bytes of each per-CPU ring-buffer
+ * @cpumask: CPUs to allocate a ring-buffer for
+ *
+ * Return: 0 on success, negative error code on failure.
+ */
+int trace_remote_alloc_buffer(struct trace_buffer_desc *desc, size_t desc_size, size_t buffer_size,
+ const struct cpumask *cpumask);
+
+/**
+ * trace_remote_free_buffer() - Free trace buffer allocated with
+ * trace_remote_alloc_buffer()
+ *
+ * Most likely called from &trace_remote_callbacks.unload_trace_buffer.
+ *
+ * @desc: Descriptor of the per-CPU ring-buffers, originally filled by
+ * trace_remote_alloc_buffer()
+ */
+void trace_remote_free_buffer(struct trace_buffer_desc *desc);
+
+#endif
diff --git a/kernel/trace/Kconfig b/kernel/trace/Kconfig
index d2c79da81e4f..99af56d39eaf 100644
--- a/kernel/trace/Kconfig
+++ b/kernel/trace/Kconfig
@@ -1238,4 +1238,7 @@ config HIST_TRIGGERS_DEBUG
source "kernel/trace/rv/Kconfig"
+config TRACE_REMOTE
+ bool
+
endif # FTRACE
diff --git a/kernel/trace/Makefile b/kernel/trace/Makefile
index dcb4e02afc5f..6dab341acc46 100644
--- a/kernel/trace/Makefile
+++ b/kernel/trace/Makefile
@@ -110,4 +110,5 @@ obj-$(CONFIG_FPROBE_EVENTS) += trace_fprobe.o
obj-$(CONFIG_TRACEPOINT_BENCHMARK) += trace_benchmark.o
obj-$(CONFIG_RV) += rv/
+obj-$(CONFIG_TRACE_REMOTE) += trace_remote.o
libftrace-y := ftrace.o
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index b3c94fbaf002..ec0cdbe96766 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -8949,7 +8949,7 @@ static struct dentry *tracing_dentry_percpu(struct trace_array *tr, int cpu)
return tr->percpu_dir;
}
-static struct dentry *
+struct dentry *
trace_create_cpu_file(const char *name, umode_t mode, struct dentry *parent,
void *data, long cpu, const struct file_operations *fops)
{
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
index 5f4bed5842f9..a3386cf0c760 100644
--- a/kernel/trace/trace.h
+++ b/kernel/trace/trace.h
@@ -672,6 +672,12 @@ struct dentry *trace_create_file(const char *name,
struct dentry *parent,
void *data,
const struct file_operations *fops);
+struct dentry *trace_create_cpu_file(const char *name,
+ umode_t mode,
+ struct dentry *parent,
+ void *data,
+ long cpu,
+ const struct file_operations *fops);
/**
diff --git a/kernel/trace/trace_remote.c b/kernel/trace/trace_remote.c
new file mode 100644
index 000000000000..3d76a4d7d5f5
--- /dev/null
+++ b/kernel/trace/trace_remote.c
@@ -0,0 +1,568 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2025 - Google LLC
+ * Author: Vincent Donnefort <vdonnefort@google.com>
+ */
+
+#include <linux/kstrtox.h>
+#include <linux/lockdep.h>
+#include <linux/mutex.h>
+#include <linux/tracefs.h>
+#include <linux/trace_remote.h>
+#include <linux/trace_seq.h>
+#include <linux/types.h>
+
+#include "trace.h"
+
+#define TRACEFS_DIR "remotes"
+#define TRACEFS_MODE_WRITE 0640
+#define TRACEFS_MODE_READ 0440
+
+struct trace_remote_iterator {
+ struct trace_remote *remote;
+ struct trace_seq seq;
+ struct delayed_work poll_work;
+ unsigned long lost_events;
+ u64 ts;
+ int cpu;
+ int evt_cpu;
+};
+
+struct trace_remote {
+ struct trace_remote_callbacks *cbs;
+ void *priv;
+ struct trace_buffer *trace_buffer;
+ struct trace_buffer_desc *trace_buffer_desc;
+ unsigned long trace_buffer_size;
+ struct ring_buffer_remote rb_remote;
+ struct mutex lock;
+ unsigned int nr_readers;
+ unsigned int poll_ms;
+ bool tracing_on;
+};
+
+static bool trace_remote_loaded(struct trace_remote *remote)
+{
+ return remote->trace_buffer;
+}
+
+static int trace_remote_load(struct trace_remote *remote)
+{
+ struct ring_buffer_remote *rb_remote = &remote->rb_remote;
+
+ lockdep_assert_held(&remote->lock);
+
+ if (trace_remote_loaded(remote))
+ return 0;
+
+ remote->trace_buffer_desc = remote->cbs->load_trace_buffer(remote->trace_buffer_size,
+ remote->priv);
+ if (IS_ERR(remote->trace_buffer_desc))
+ return PTR_ERR(remote->trace_buffer_desc);
+
+ rb_remote->desc = remote->trace_buffer_desc;
+ rb_remote->swap_reader_page = remote->cbs->swap_reader_page;
+ rb_remote->priv = remote->priv;
+ remote->trace_buffer = ring_buffer_alloc_remote(rb_remote);
+ if (!remote->trace_buffer) {
+ remote->cbs->unload_trace_buffer(remote->trace_buffer_desc, remote->priv);
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static void trace_remote_try_unload(struct trace_remote *remote)
+{
+ lockdep_assert_held(&remote->lock);
+
+ if (!trace_remote_loaded(remote))
+ return;
+
+ /* The buffer is being read or writable */
+ if (remote->nr_readers || remote->tracing_on)
+ return;
+
+ /* The buffer has readable data */
+ if (!ring_buffer_empty(remote->trace_buffer))
+ return;
+
+ ring_buffer_free(remote->trace_buffer);
+ remote->trace_buffer = NULL;
+ remote->cbs->unload_trace_buffer(remote->trace_buffer_desc, remote->priv);
+}
+
+static int trace_remote_enable_tracing(struct trace_remote *remote)
+{
+ int ret;
+
+ lockdep_assert_held(&remote->lock);
+
+ if (remote->tracing_on)
+ return 0;
+
+ ret = trace_remote_load(remote);
+ if (ret)
+ return ret;
+
+ ret = remote->cbs->enable_tracing(true, remote->priv);
+ if (ret) {
+ trace_remote_try_unload(remote);
+ return ret;
+ }
+
+ remote->tracing_on = true;
+
+ return 0;
+}
+
+static int trace_remote_disable_tracing(struct trace_remote *remote)
+{
+ int ret;
+
+ lockdep_assert_held(&remote->lock);
+
+ if (!remote->tracing_on)
+ return 0;
+
+ ret = remote->cbs->enable_tracing(false, remote->priv);
+ if (ret)
+ return ret;
+
+ ring_buffer_poll_remote(remote->trace_buffer, RING_BUFFER_ALL_CPUS);
+ remote->tracing_on = false;
+ trace_remote_try_unload(remote);
+
+ return 0;
+}
+
+static ssize_t
+tracing_on_write(struct file *filp, const char __user *ubuf, size_t cnt, loff_t *ppos)
+{
+ struct trace_remote *remote = filp->private_data;
+ unsigned long val;
+ int ret;
+
+ ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
+ if (ret)
+ return ret;
+
+ guard(mutex)(&remote->lock);
+
+ ret = val ? trace_remote_enable_tracing(remote) : trace_remote_disable_tracing(remote);
+ if (ret)
+ return ret;
+
+ return cnt;
+}
+static int tracing_on_show(struct seq_file *s, void *unused)
+{
+ struct trace_remote *remote = s->private;
+
+ seq_printf(s, "%d\n", remote->tracing_on);
+
+ return 0;
+}
+DEFINE_SHOW_STORE_ATTRIBUTE(tracing_on);
+
+static ssize_t buffer_size_kb_write(struct file *filp, const char __user *ubuf, size_t cnt,
+ loff_t *ppos)
+{
+ struct trace_remote *remote = filp->private_data;
+ unsigned long val;
+ int ret;
+
+ ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
+ if (ret)
+ return ret;
+
+ /* KiB to Bytes */
+ if (!val || check_shl_overflow(val, 10, &val))
+ return -EINVAL;
+
+ guard(mutex)(&remote->lock);
+
+ remote->trace_buffer_size = val;
+
+ return cnt;
+}
+
+static int buffer_size_kb_show(struct seq_file *s, void *unused)
+{
+ struct trace_remote *remote = s->private;
+
+ seq_printf(s, "%lu (%s)\n", remote->trace_buffer_size >> 10,
+ trace_remote_loaded(remote) ? "loaded" : "unloaded");
+
+ return 0;
+}
+DEFINE_SHOW_STORE_ATTRIBUTE(buffer_size_kb);
+
+static int trace_remote_get(struct trace_remote *remote, int cpu)
+{
+ int ret;
+
+ if (remote->nr_readers == UINT_MAX)
+ return -EBUSY;
+
+ ret = trace_remote_load(remote);
+ if (ret)
+ return ret;
+
+ remote->nr_readers++;
+
+ return 0;
+}
+
+static void trace_remote_put(struct trace_remote *remote)
+{
+ if (WARN_ON(!remote->nr_readers))
+ return;
+
+ remote->nr_readers--;
+ if (remote->nr_readers)
+ return;
+
+ trace_remote_try_unload(remote);
+}
+
+static void __poll_remote(struct work_struct *work)
+{
+ struct delayed_work *dwork = to_delayed_work(work);
+ struct trace_remote_iterator *iter;
+
+ iter = container_of(dwork, struct trace_remote_iterator, poll_work);
+ ring_buffer_poll_remote(iter->remote->trace_buffer, iter->cpu);
+ schedule_delayed_work((struct delayed_work *)work,
+ msecs_to_jiffies(iter->remote->poll_ms));
+}
+
+static struct trace_remote_iterator
+*trace_remote_iter(struct trace_remote *remote, int cpu, enum tri_type type)
+{
+ struct trace_remote_iterator *iter = NULL;
+ int ret;
+
+ lockdep_assert_held(&remote->lock);
+
+
+ ret = trace_remote_get(remote, cpu);
+ if (ret)
+ return ERR_PTR(ret);
+
+ /* Test the CPU */
+ ret = ring_buffer_poll_remote(remote->trace_buffer, cpu);
+ if (ret)
+ goto err;
+
+ iter = kzalloc(sizeof(*iter), GFP_KERNEL);
+ if (iter) {
+ iter->remote = remote;
+ iter->cpu = cpu;
+ trace_seq_init(&iter->seq);
+ INIT_DELAYED_WORK(&iter->poll_work, __poll_remote);
+ schedule_delayed_work(&iter->poll_work, msecs_to_jiffies(remote->poll_ms));
+
+ return iter;
+ }
+ ret = -ENOMEM;
+
+err:
+ kfree(iter);
+ trace_remote_put(remote);
+
+ return ERR_PTR(ret);
+}
+
+static void trace_remote_iter_free(struct trace_remote_iterator *iter)
+{
+ struct trace_remote *remote;
+
+ if (!iter)
+ return;
+
+ remote = iter->remote;
+
+ lockdep_assert_held(&remote->lock);
+
+ kfree(iter);
+ trace_remote_put(remote);
+}
+
+static bool trace_remote_iter_read_event(struct trace_remote_iterator *iter)
+{
+ struct trace_buffer *trace_buffer = iter->remote->trace_buffer;
+ int cpu = iter->cpu;
+
+ if (cpu != RING_BUFFER_ALL_CPUS) {
+ if (ring_buffer_empty_cpu(trace_buffer, cpu))
+ return false;
+
+ if (!ring_buffer_peek(trace_buffer, cpu, &iter->ts, &iter->lost_events))
+ return false;
+
+ iter->evt_cpu = cpu;
+ return true;
+ }
+
+ iter->ts = U64_MAX;
+ for_each_possible_cpu(cpu) {
+ unsigned long lost_events;
+ u64 ts;
+
+ if (ring_buffer_empty_cpu(trace_buffer, cpu))
+ continue;
+
+ if (!ring_buffer_peek(trace_buffer, cpu, &ts, &lost_events))
+ continue;
+
+ if (ts >= iter->ts)
+ continue;
+
+ iter->ts = ts;
+ iter->evt_cpu = cpu;
+ iter->lost_events = lost_events;
+ }
+
+ return iter->ts != U64_MAX;
+}
+
+static int trace_remote_iter_print(struct trace_remote_iterator *iter)
+{
+ unsigned long usecs_rem;
+ u64 ts = iter->ts;
+
+ if (iter->lost_events)
+ trace_seq_printf(&iter->seq, "CPU:%d [LOST %lu EVENTS]\n",
+ iter->evt_cpu, iter->lost_events);
+
+ do_div(ts, 1000);
+ usecs_rem = do_div(ts, USEC_PER_SEC);
+
+ trace_seq_printf(&iter->seq, "[%03d]\t%5llu.%06lu: ", iter->evt_cpu,
+ ts, usecs_rem);
+
+ return trace_seq_has_overflowed(&iter->seq) ? -EOVERFLOW : 0;
+}
+
+static int trace_pipe_open(struct inode *inode, struct file *filp)
+{
+ struct trace_remote *remote = inode->i_private;
+ struct trace_remote_iterator *iter;
+ int cpu = RING_BUFFER_ALL_CPUS;
+
+ if (inode->i_cdev)
+ cpu = (long)inode->i_cdev - 1;
+
+ guard(mutex)(&remote->lock);
+ iter = trace_remote_iter(remote, cpu);
+ filp->private_data = iter;
+
+ return IS_ERR(iter) ? PTR_ERR(iter) : 0;
+}
+
+static int trace_pipe_release(struct inode *inode, struct file *filp)
+{
+ struct trace_remote_iterator *iter = filp->private_data;
+ struct trace_remote *remote = iter->remote;
+
+ guard(mutex)(&remote->lock);
+
+ trace_remote_iter_free(iter);
+
+ return 0;
+}
+
+static ssize_t trace_pipe_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
+{
+ struct trace_remote_iterator *iter = filp->private_data;
+ struct trace_buffer *trace_buffer = iter->remote->trace_buffer;
+ int ret;
+
+copy_to_user:
+ ret = trace_seq_to_user(&iter->seq, ubuf, cnt);
+ if (ret != -EBUSY)
+ return ret;
+
+ trace_seq_init(&iter->seq);
+
+ ret = ring_buffer_wait(trace_buffer, iter->cpu, 0, NULL, NULL);
+ if (ret < 0)
+ return ret;
+
+ while (trace_remote_iter_next(iter)) {
+ while (trace_remote_iter_read_event(iter)) {
+ int prev_len = iter->seq.seq.len;
+
+ if (trace_remote_iter_print_event(iter)) {
+ iter->seq.seq.len = prev_len;
+ break;
+ }
+
+ ring_buffer_consume(trace_buffer, iter->evt_cpu, NULL, NULL);
+ }
+
+ goto copy_to_user;
+}
+
+static const struct file_operations trace_pipe_fops = {
+ .open = trace_pipe_open,
+ .read = trace_pipe_read,
+ .release = trace_pipe_release,
+};
+
+static int trace_remote_init_tracefs(const char *name, struct trace_remote *remote)
+{
+ struct dentry *remote_d, *percpu_d;
+ static struct dentry *root;
+ static DEFINE_MUTEX(lock);
+ bool root_inited = false;
+ int cpu;
+
+ guard(mutex)(&lock);
+
+ if (!root) {
+ root = tracefs_create_dir(TRACEFS_DIR, NULL);
+ if (!root) {
+ pr_err("Failed to create tracefs dir "TRACEFS_DIR"\n");
+ return -ENOMEM;
+ }
+ root_inited = true;
+ }
+
+ remote_d = tracefs_create_dir(name, root);
+ if (!remote_d) {
+ pr_err("Failed to create tracefs dir "TRACEFS_DIR"%s/\n", name);
+ goto err;
+ }
+
+ if (!trace_create_file("tracing_on", TRACEFS_MODE_WRITE, remote_d, remote,
+ &tracing_on_fops) ||
+ !trace_create_file("buffer_size_kb", TRACEFS_MODE_WRITE, remote_d, remote,
+ &buffer_size_kb_fops) ||
+ !trace_create_file("trace_pipe", TRACEFS_MODE_READ, remote_d, remote,
+ &trace_pipe_fops))
+ goto err;
+
+ percpu_d = tracefs_create_dir("per_cpu", remote_d);
+ if (!percpu_d) {
+ pr_err("Failed to create tracefs dir "TRACEFS_DIR"%s/per_cpu/\n", name);
+ goto err;
+ }
+
+ for_each_possible_cpu(cpu) {
+ struct dentry *cpu_d;
+ char cpu_name[16];
+
+ snprintf(cpu_name, sizeof(cpu_name), "cpu%d", cpu);
+ cpu_d = tracefs_create_dir(cpu_name, percpu_d);
+ if (!cpu_d) {
+ pr_err("Failed to create tracefs dir "TRACEFS_DIR"%s/percpu/cpu%d\n",
+ name, cpu);
+ goto err;
+ }
+
+ if (!trace_create_cpu_file("trace_pipe", TRACEFS_MODE_READ, cpu_d, remote, cpu,
+ &trace_pipe_fops))
+ goto err;
+ }
+
+ return 0;
+
+err:
+ if (root_inited) {
+ tracefs_remove(root);
+ root = NULL;
+ } else {
+ tracefs_remove(remote_d);
+ }
+
+ return -ENOMEM;
+}
+
+int trace_remote_register(const char *name, struct trace_remote_callbacks *cbs, void *priv)
+{
+ struct trace_remote *remote;
+
+ remote = kzalloc(sizeof(*remote), GFP_KERNEL);
+ if (!remote)
+ return -ENOMEM;
+
+ remote->cbs = cbs;
+ remote->priv = priv;
+ remote->trace_buffer_size = 7 << 10;
+ remote->poll_ms = 100;
+ mutex_init(&remote->lock);
+
+ if (trace_remote_init_tracefs(name, remote)) {
+ kfree(remote);
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+void trace_remote_free_buffer(struct trace_buffer_desc *desc)
+{
+ struct ring_buffer_desc *rb_desc;
+ int cpu;
+
+ for_each_ring_buffer_desc(rb_desc, cpu, desc) {
+ unsigned int id;
+
+ free_page(rb_desc->meta_va);
+
+ for (id = 0; id < rb_desc->nr_page_va; id++)
+ free_page(rb_desc->page_va[id]);
+ }
+}
+
+int trace_remote_alloc_buffer(struct trace_buffer_desc *desc, size_t desc_size, size_t buffer_size,
+ const struct cpumask *cpumask)
+{
+ unsigned int nr_pages = max(DIV_ROUND_UP(buffer_size, PAGE_SIZE), 2UL) + 1;
+ void *desc_end = desc + desc_size;
+ struct ring_buffer_desc *rb_desc;
+ int cpu, ret = -ENOMEM;
+
+ if (desc_size < struct_size(desc, __data, 0))
+ return -EINVAL;
+
+ desc->nr_cpus = 0;
+ desc->struct_len = struct_size(desc, __data, 0);
+
+ rb_desc = (struct ring_buffer_desc *)&desc->__data[0];
+
+ for_each_cpu(cpu, cpumask) {
+ unsigned int id;
+
+ if ((void *)rb_desc + struct_size(rb_desc, page_va, nr_pages) > desc_end) {
+ ret = -EINVAL;
+ goto err;
+ }
+
+ rb_desc->cpu = cpu;
+ rb_desc->nr_page_va = 0;
+ rb_desc->meta_va = (unsigned long)__get_free_page(GFP_KERNEL);
+ if (!rb_desc->meta_va)
+ goto err;
+
+ for (id = 0; id < nr_pages; id++) {
+ rb_desc->page_va[id] = (unsigned long)__get_free_page(GFP_KERNEL);
+ if (!rb_desc->page_va[id])
+ goto err;
+
+ rb_desc->nr_page_va++;
+ }
+ desc->nr_cpus++;
+ desc->struct_len += offsetof(struct ring_buffer_desc, page_va);
+ desc->struct_len += struct_size(rb_desc, page_va, rb_desc->nr_page_va);
+ rb_desc = __next_ring_buffer_desc(rb_desc);
+ }
+
+ return 0;
+
+err:
+ trace_remote_free_buffer(desc);
+ return ret;
+}
--
2.51.0.618.g983fd99d29-goog
^ permalink raw reply related [flat|nested] 33+ messages in thread* [PATCH v7 06/28] tracing: Add reset to trace remotes
2025-10-03 13:37 [PATCH v7 00/28] Tracefs support for pKVM Vincent Donnefort
` (4 preceding siblings ...)
2025-10-03 13:38 ` [PATCH v7 05/28] tracing: Introduce trace remotes Vincent Donnefort
@ 2025-10-03 13:38 ` Vincent Donnefort
2025-10-03 13:38 ` [PATCH v7 07/28] tracing: Add non-consuming read " Vincent Donnefort
` (21 subsequent siblings)
27 siblings, 0 replies; 33+ messages in thread
From: Vincent Donnefort @ 2025-10-03 13:38 UTC (permalink / raw)
To: rostedt, mhiramat, mathieu.desnoyers, linux-trace-kernel, maz,
oliver.upton, joey.gouly, suzuki.poulose, yuzenghui
Cc: kvmarm, linux-arm-kernel, jstultz, qperret, will, aneesh.kumar,
kernel-team, linux-kernel, Vincent Donnefort
Allow to reset the trace remote buffer by writing to the Tracefs "trace"
file. This is similar to the regular Tracefs interface.
Signed-off-by: Vincent Donnefort <vdonnefort@google.com>
diff --git a/include/linux/trace_remote.h b/include/linux/trace_remote.h
index feb3433c2128..55754ef78104 100644
--- a/include/linux/trace_remote.h
+++ b/include/linux/trace_remote.h
@@ -18,6 +18,8 @@
* remote to allow writing.
* @swap_reader_page: Called when Tracefs consumes a new page from a
* ring-buffer. It is expected from the remote to isolate a
+ * @reset: Called on `echo 0 > trace`. It is expected from the
+ * remote to reset all ring-buffer pages.
* new reader-page from the @cpu ring-buffer.
*/
struct trace_remote_callbacks {
@@ -25,6 +27,7 @@ struct trace_remote_callbacks {
void (*unload_trace_buffer)(struct trace_buffer_desc *desc, void *priv);
int (*enable_tracing)(bool enable, void *priv);
int (*swap_reader_page)(unsigned int cpu, void *priv);
+ int (*reset)(unsigned int cpu, void *priv);
};
/**
diff --git a/kernel/trace/trace_remote.c b/kernel/trace/trace_remote.c
index 3d76a4d7d5f5..abf83ff75df5 100644
--- a/kernel/trace/trace_remote.c
+++ b/kernel/trace/trace_remote.c
@@ -63,6 +63,7 @@ static int trace_remote_load(struct trace_remote *remote)
rb_remote->desc = remote->trace_buffer_desc;
rb_remote->swap_reader_page = remote->cbs->swap_reader_page;
rb_remote->priv = remote->priv;
+ rb_remote->reset = remote->cbs->reset;
remote->trace_buffer = ring_buffer_alloc_remote(rb_remote);
if (!remote->trace_buffer) {
remote->cbs->unload_trace_buffer(remote->trace_buffer_desc, remote->priv);
@@ -136,6 +137,21 @@ static int trace_remote_disable_tracing(struct trace_remote *remote)
return 0;
}
+static void trace_remote_reset(struct trace_remote *remote, int cpu)
+{
+ lockdep_assert_held(&remote->lock);
+
+ if (!trace_remote_loaded(remote))
+ return;
+
+ if (cpu == RING_BUFFER_ALL_CPUS)
+ ring_buffer_reset(remote->trace_buffer);
+ else
+ ring_buffer_reset_cpu(remote->trace_buffer, cpu);
+
+ trace_remote_try_unload(remote);
+}
+
static ssize_t
tracing_on_write(struct file *filp, const char __user *ubuf, size_t cnt, loff_t *ppos)
{
@@ -411,6 +427,26 @@ static const struct file_operations trace_pipe_fops = {
.release = trace_pipe_release,
};
+static ssize_t trace_write(struct file *filp, const char __user *ubuf, size_t cnt, loff_t *ppos)
+{
+ struct inode *inode = file_inode(filp);
+ struct trace_remote *remote = inode->i_private;
+ int cpu = RING_BUFFER_ALL_CPUS;
+
+ if (inode->i_cdev)
+ cpu = (long)inode->i_cdev - 1;
+
+ guard(mutex)(&remote->lock);
+
+ trace_remote_reset(remote, cpu);
+
+ return cnt;
+}
+
+static const struct file_operations trace_fops = {
+ .write = trace_write,
+};
+
static int trace_remote_init_tracefs(const char *name, struct trace_remote *remote)
{
struct dentry *remote_d, *percpu_d;
@@ -441,7 +477,9 @@ static int trace_remote_init_tracefs(const char *name, struct trace_remote *remo
!trace_create_file("buffer_size_kb", TRACEFS_MODE_WRITE, remote_d, remote,
&buffer_size_kb_fops) ||
!trace_create_file("trace_pipe", TRACEFS_MODE_READ, remote_d, remote,
- &trace_pipe_fops))
+ &trace_pipe_fops) ||
+ !trace_create_file("trace", TRACEFS_MODE_WRITE, remote_d, remote,
+ &trace_fops))
goto err;
percpu_d = tracefs_create_dir("per_cpu", remote_d);
@@ -463,7 +501,9 @@ static int trace_remote_init_tracefs(const char *name, struct trace_remote *remo
}
if (!trace_create_cpu_file("trace_pipe", TRACEFS_MODE_READ, cpu_d, remote, cpu,
- &trace_pipe_fops))
+ &trace_pipe_fops) ||
+ !trace_create_cpu_file("trace", TRACEFS_MODE_WRITE, cpu_d, remote, cpu,
+ &trace_fops))
goto err;
}
--
2.51.0.618.g983fd99d29-goog
^ permalink raw reply related [flat|nested] 33+ messages in thread* [PATCH v7 07/28] tracing: Add non-consuming read to trace remotes
2025-10-03 13:37 [PATCH v7 00/28] Tracefs support for pKVM Vincent Donnefort
` (5 preceding siblings ...)
2025-10-03 13:38 ` [PATCH v7 06/28] tracing: Add reset to " Vincent Donnefort
@ 2025-10-03 13:38 ` Vincent Donnefort
2025-10-03 13:38 ` [PATCH v7 08/28] tracing: Add init callback " Vincent Donnefort
` (20 subsequent siblings)
27 siblings, 0 replies; 33+ messages in thread
From: Vincent Donnefort @ 2025-10-03 13:38 UTC (permalink / raw)
To: rostedt, mhiramat, mathieu.desnoyers, linux-trace-kernel, maz,
oliver.upton, joey.gouly, suzuki.poulose, yuzenghui
Cc: kvmarm, linux-arm-kernel, jstultz, qperret, will, aneesh.kumar,
kernel-team, linux-kernel, Vincent Donnefort
Allow reading the trace file for trace remotes. This performs a
non-consuming read of the trace buffer.
Signed-off-by: Vincent Donnefort <vdonnefort@google.com>
diff --git a/kernel/trace/trace_remote.c b/kernel/trace/trace_remote.c
index abf83ff75df5..c011df20df95 100644
--- a/kernel/trace/trace_remote.c
+++ b/kernel/trace/trace_remote.c
@@ -18,14 +18,25 @@
#define TRACEFS_MODE_WRITE 0640
#define TRACEFS_MODE_READ 0440
+enum tri_type {
+ TRI_CONSUMING,
+ TRI_NONCONSUMING,
+};
+
struct trace_remote_iterator {
struct trace_remote *remote;
struct trace_seq seq;
struct delayed_work poll_work;
unsigned long lost_events;
u64 ts;
+ union {
+ struct ring_buffer_iter **rb_iters;
+ struct ring_buffer_iter *rb_iter;
+ };
int cpu;
int evt_cpu;
+ loff_t pos;
+ enum tri_type type;
};
struct trace_remote {
@@ -36,6 +47,8 @@ struct trace_remote {
unsigned long trace_buffer_size;
struct ring_buffer_remote rb_remote;
struct mutex lock;
+ struct rw_semaphore reader_lock;
+ struct rw_semaphore *pcpu_reader_locks;
unsigned int nr_readers;
unsigned int poll_ms;
bool tracing_on;
@@ -225,6 +238,20 @@ static int trace_remote_get(struct trace_remote *remote, int cpu)
if (ret)
return ret;
+ if (cpu != RING_BUFFER_ALL_CPUS && !remote->pcpu_reader_locks) {
+ int lock_cpu;
+
+ remote->pcpu_reader_locks = kcalloc(nr_cpu_ids, sizeof(*remote->pcpu_reader_locks),
+ GFP_KERNEL);
+ if (!remote->pcpu_reader_locks) {
+ trace_remote_try_unload(remote);
+ return -ENOMEM;
+ }
+
+ for_each_possible_cpu(lock_cpu)
+ init_rwsem(&remote->pcpu_reader_locks[lock_cpu]);
+ }
+
remote->nr_readers++;
return 0;
@@ -239,6 +266,9 @@ static void trace_remote_put(struct trace_remote *remote)
if (remote->nr_readers)
return;
+ kfree(remote->pcpu_reader_locks);
+ remote->pcpu_reader_locks = NULL;
+
trace_remote_try_unload(remote);
}
@@ -253,6 +283,48 @@ static void __poll_remote(struct work_struct *work)
msecs_to_jiffies(iter->remote->poll_ms));
}
+static int __alloc_ring_buffer_iter(struct trace_remote_iterator *iter, int cpu)
+{
+ bool once = false;
+
+ if (cpu != RING_BUFFER_ALL_CPUS) {
+ iter->rb_iter = ring_buffer_read_start(iter->remote->trace_buffer, cpu, GFP_KERNEL);
+
+ return iter->rb_iter ? 0 : -ENOMEM;
+ }
+
+ iter->rb_iters = kcalloc(nr_cpu_ids, sizeof(*iter->rb_iters), GFP_KERNEL);
+ if (!iter->rb_iters)
+ return -ENOMEM;
+
+ for_each_possible_cpu(cpu) {
+ iter->rb_iters[cpu] = ring_buffer_read_start(iter->remote->trace_buffer, cpu,
+ GFP_KERNEL);
+ if (iter->rb_iters[cpu])
+ once = true;
+ }
+
+ return once ? 0 : -ENOMEM;
+}
+
+static void __free_ring_buffer_iter(struct trace_remote_iterator *iter, int cpu)
+{
+ if (!iter->rb_iter)
+ return;
+
+ if (cpu != RING_BUFFER_ALL_CPUS) {
+ ring_buffer_read_finish(iter->rb_iter);
+ return;
+ }
+
+ for_each_possible_cpu(cpu) {
+ if (iter->rb_iters[cpu])
+ ring_buffer_read_finish(iter->rb_iters[cpu]);
+ }
+
+ kfree(iter->rb_iters);
+}
+
static struct trace_remote_iterator
*trace_remote_iter(struct trace_remote *remote, int cpu, enum tri_type type)
{
@@ -261,6 +333,8 @@ static struct trace_remote_iterator
lockdep_assert_held(&remote->lock);
+ if (type == TRI_NONCONSUMING && !trace_remote_loaded(remote))
+ return NULL;
ret = trace_remote_get(remote, cpu);
if (ret)
@@ -275,9 +349,21 @@ static struct trace_remote_iterator
if (iter) {
iter->remote = remote;
iter->cpu = cpu;
+ iter->type = type;
trace_seq_init(&iter->seq);
- INIT_DELAYED_WORK(&iter->poll_work, __poll_remote);
- schedule_delayed_work(&iter->poll_work, msecs_to_jiffies(remote->poll_ms));
+
+ switch (type) {
+ case TRI_CONSUMING:
+ INIT_DELAYED_WORK(&iter->poll_work, __poll_remote);
+ schedule_delayed_work(&iter->poll_work, msecs_to_jiffies(remote->poll_ms));
+ break;
+ case TRI_NONCONSUMING:
+ ret = __alloc_ring_buffer_iter(iter, cpu);
+ break;
+ }
+
+ if (ret)
+ goto err;
return iter;
}
@@ -301,10 +387,100 @@ static void trace_remote_iter_free(struct trace_remote_iterator *iter)
lockdep_assert_held(&remote->lock);
+ switch (iter->type) {
+ case TRI_CONSUMING:
+ cancel_delayed_work_sync(&iter->poll_work);
+ break;
+ case TRI_NONCONSUMING:
+ __free_ring_buffer_iter(iter, iter->cpu);
+ break;
+ }
+
kfree(iter);
trace_remote_put(remote);
}
+static void trace_remote_iter_read_start(struct trace_remote_iterator *iter)
+{
+ struct trace_remote *remote = iter->remote;
+ int cpu = iter->cpu;
+
+ /* Acquire global reader lock */
+ if (cpu == RING_BUFFER_ALL_CPUS && iter->type == TRI_CONSUMING)
+ down_write(&remote->reader_lock);
+ else
+ down_read(&remote->reader_lock);
+
+ if (cpu == RING_BUFFER_ALL_CPUS)
+ return;
+
+ /*
+ * No need for the remote lock here, iter holds a reference on
+ * remote->nr_readers
+ */
+
+ /* Get the per-CPU one */
+ if (WARN_ON_ONCE(!remote->pcpu_reader_locks))
+ return;
+
+ if (iter->type == TRI_CONSUMING)
+ down_write(&remote->pcpu_reader_locks[cpu]);
+ else
+ down_read(&remote->pcpu_reader_locks[cpu]);
+}
+
+static void trace_remote_iter_read_finished(struct trace_remote_iterator *iter)
+{
+ struct trace_remote *remote = iter->remote;
+ int cpu = iter->cpu;
+
+ /* Release per-CPU reader lock */
+ if (cpu != RING_BUFFER_ALL_CPUS) {
+ /*
+ * No need for the remote lock here, iter holds a reference on
+ * remote->nr_readers
+ */
+ if (iter->type == TRI_CONSUMING)
+ up_write(&remote->pcpu_reader_locks[cpu]);
+ else
+ up_read(&remote->pcpu_reader_locks[cpu]);
+ }
+
+ /* Release global reader lock */
+ if (cpu == RING_BUFFER_ALL_CPUS && iter->type == TRI_CONSUMING)
+ up_write(&remote->reader_lock);
+ else
+ up_read(&remote->reader_lock);
+}
+
+static struct ring_buffer_iter *__get_rb_iter(struct trace_remote_iterator *iter, int cpu)
+{
+ return iter->cpu != RING_BUFFER_ALL_CPUS ? iter->rb_iter : iter->rb_iters[cpu];
+}
+
+static struct ring_buffer_event *
+__peek_event(struct trace_remote_iterator *iter, int cpu, u64 *ts, unsigned long *lost_events)
+{
+ struct ring_buffer_event *rb_evt;
+ struct ring_buffer_iter *rb_iter;
+
+ switch (iter->type) {
+ case TRI_CONSUMING:
+ return ring_buffer_peek(iter->remote->trace_buffer, cpu, ts, lost_events);
+ case TRI_NONCONSUMING:
+ rb_iter = __get_rb_iter(iter, cpu);
+ rb_evt = ring_buffer_iter_peek(rb_iter, ts);
+ if (!rb_evt)
+ return NULL;
+
+ *lost_events = ring_buffer_iter_dropped(rb_iter);
+
+ return rb_evt;
+ }
+
+ return NULL;
+}
+
static bool trace_remote_iter_read_event(struct trace_remote_iterator *iter)
{
struct trace_buffer *trace_buffer = iter->remote->trace_buffer;
@@ -314,7 +490,7 @@ static bool trace_remote_iter_read_event(struct trace_remote_iterator *iter)
if (ring_buffer_empty_cpu(trace_buffer, cpu))
return false;
- if (!ring_buffer_peek(trace_buffer, cpu, &iter->ts, &iter->lost_events))
+ if (!__peek_event(iter, cpu, &iter->ts, &iter->lost_events))
return false;
iter->evt_cpu = cpu;
@@ -329,7 +505,7 @@ static bool trace_remote_iter_read_event(struct trace_remote_iterator *iter)
if (ring_buffer_empty_cpu(trace_buffer, cpu))
continue;
- if (!ring_buffer_peek(trace_buffer, cpu, &ts, &lost_events))
+ if (!__peek_event(iter, cpu, &ts, &lost_events))
continue;
if (ts >= iter->ts)
@@ -343,7 +519,21 @@ static bool trace_remote_iter_read_event(struct trace_remote_iterator *iter)
return iter->ts != U64_MAX;
}
-static int trace_remote_iter_print(struct trace_remote_iterator *iter)
+static void trace_remote_iter_move(struct trace_remote_iterator *iter)
+{
+ struct trace_buffer *trace_buffer = iter->remote->trace_buffer;
+
+ switch (iter->type) {
+ case TRI_CONSUMING:
+ ring_buffer_consume(trace_buffer, iter->evt_cpu, NULL, NULL);
+ break;
+ case TRI_NONCONSUMING:
+ ring_buffer_iter_advance(__get_rb_iter(iter, iter->evt_cpu));
+ break;
+ }
+}
+
+static int trace_remote_iter_print_event(struct trace_remote_iterator *iter)
{
unsigned long usecs_rem;
u64 ts = iter->ts;
@@ -371,7 +561,11 @@ static int trace_pipe_open(struct inode *inode, struct file *filp)
cpu = (long)inode->i_cdev - 1;
guard(mutex)(&remote->lock);
- iter = trace_remote_iter(remote, cpu);
+
+ iter = trace_remote_iter(remote, cpu, TRI_CONSUMING);
+ if (IS_ERR(iter))
+ return PTR_ERR(iter);
+
filp->private_data = iter;
return IS_ERR(iter) ? PTR_ERR(iter) : 0;
@@ -406,7 +600,8 @@ static ssize_t trace_pipe_read(struct file *filp, char __user *ubuf, size_t cnt,
if (ret < 0)
return ret;
- while (trace_remote_iter_next(iter)) {
+ trace_remote_iter_read_start(iter);
+
while (trace_remote_iter_read_event(iter)) {
int prev_len = iter->seq.seq.len;
@@ -415,9 +610,11 @@ static ssize_t trace_pipe_read(struct file *filp, char __user *ubuf, size_t cnt,
break;
}
- ring_buffer_consume(trace_buffer, iter->evt_cpu, NULL, NULL);
+ trace_remote_iter_move(iter);
}
+ trace_remote_iter_read_finished(iter);
+
goto copy_to_user;
}
@@ -427,6 +624,119 @@ static const struct file_operations trace_pipe_fops = {
.release = trace_pipe_release,
};
+static void *trace_seq_start(struct seq_file *m, loff_t *pos)
+{
+ struct trace_remote_iterator *iter = m->private;
+ loff_t i = *pos;
+
+ if (!iter)
+ return NULL;
+
+ if (iter->pos <= *pos) {
+ do {
+ if (!trace_remote_iter_read_event(iter))
+ return NULL;
+
+ trace_remote_iter_move(iter);
+ iter->pos++;
+ } while (i--);
+ }
+
+ return iter;
+}
+
+static void *trace_seq_next(struct seq_file *m, void *v, loff_t *pos)
+{
+ struct trace_remote_iterator *iter = m->private;
+
+ ++*pos;
+
+ if (!iter || !trace_remote_iter_read_event(iter))
+ return NULL;
+
+ trace_remote_iter_move(iter);
+ iter->pos++;
+
+ return iter;
+}
+
+static int trace_seq_show(struct seq_file *m, void *v)
+{
+ struct trace_remote_iterator *iter = v;
+
+ trace_seq_init(&iter->seq);
+
+ if (trace_remote_iter_print_event(iter)) {
+ seq_printf(m, "[EVENT %d PRINT TOO BIG]\n", iter->evt->id);
+ return 0;
+ }
+
+ return trace_print_seq(m, &iter->seq);
+}
+
+static void trace_seq_stop(struct seq_file *s, void *v) { }
+
+static const struct seq_operations trace_seq_ops = {
+ .start = trace_seq_start,
+ .next = trace_seq_next,
+ .show = trace_seq_show,
+ .stop = trace_seq_stop,
+};
+
+static int trace_open(struct inode *inode, struct file *filp)
+{
+ struct trace_remote *remote = inode->i_private;
+ struct trace_remote_iterator *iter = NULL;
+ int cpu = RING_BUFFER_ALL_CPUS;
+ int ret;
+
+ if (!(filp->f_mode & FMODE_READ))
+ return 0;
+
+ if (inode->i_cdev)
+ cpu = (long)inode->i_cdev - 1;
+
+ guard(mutex)(&remote->lock);
+
+ iter = trace_remote_iter(remote, cpu, TRI_NONCONSUMING);
+ if (IS_ERR(iter))
+ return PTR_ERR(iter);
+
+ ret = seq_open(filp, &trace_seq_ops);
+ if (ret) {
+ trace_remote_iter_free(iter);
+ return ret;
+ }
+
+ if (iter)
+ trace_remote_iter_read_start(iter);
+
+ ((struct seq_file *)filp->private_data)->private = (void *)iter;
+
+ return 0;
+}
+
+static int trace_release(struct inode *inode, struct file *filp)
+{
+ struct trace_remote_iterator *iter;
+
+ if (!(filp->f_mode & FMODE_READ))
+ return 0;
+
+ iter = ((struct seq_file *)filp->private_data)->private;
+ seq_release(inode, filp);
+
+ if (!iter)
+ return 0;
+
+ guard(mutex)(&iter->remote->lock);
+
+ trace_remote_iter_read_finished(iter);
+ trace_remote_iter_free(iter);
+
+ return 0;
+}
+
static ssize_t trace_write(struct file *filp, const char __user *ubuf, size_t cnt, loff_t *ppos)
{
struct inode *inode = file_inode(filp);
@@ -444,7 +754,11 @@ static ssize_t trace_write(struct file *filp, const char __user *ubuf, size_t cn
}
static const struct file_operations trace_fops = {
+ .open = trace_open,
.write = trace_write,
+ .read = seq_read,
+ .read_iter = seq_read_iter,
+ .release = trace_release,
};
static int trace_remote_init_tracefs(const char *name, struct trace_remote *remote)
@@ -533,6 +847,7 @@ int trace_remote_register(const char *name, struct trace_remote_callbacks *cbs,
remote->trace_buffer_size = 7 << 10;
remote->poll_ms = 100;
mutex_init(&remote->lock);
+ init_rwsem(&remote->reader_lock);
if (trace_remote_init_tracefs(name, remote)) {
kfree(remote);
--
2.51.0.618.g983fd99d29-goog
^ permalink raw reply related [flat|nested] 33+ messages in thread* [PATCH v7 08/28] tracing: Add init callback to trace remotes
2025-10-03 13:37 [PATCH v7 00/28] Tracefs support for pKVM Vincent Donnefort
` (6 preceding siblings ...)
2025-10-03 13:38 ` [PATCH v7 07/28] tracing: Add non-consuming read " Vincent Donnefort
@ 2025-10-03 13:38 ` Vincent Donnefort
2025-10-03 13:38 ` [PATCH v7 09/28] tracing: Add events " Vincent Donnefort
` (19 subsequent siblings)
27 siblings, 0 replies; 33+ messages in thread
From: Vincent Donnefort @ 2025-10-03 13:38 UTC (permalink / raw)
To: rostedt, mhiramat, mathieu.desnoyers, linux-trace-kernel, maz,
oliver.upton, joey.gouly, suzuki.poulose, yuzenghui
Cc: kvmarm, linux-arm-kernel, jstultz, qperret, will, aneesh.kumar,
kernel-team, linux-kernel, Vincent Donnefort
Add a .init call back so the trace remote callers can add entries to the
tracefs directory.
Signed-off-by: Vincent Donnefort <vdonnefort@google.com>
diff --git a/include/linux/trace_remote.h b/include/linux/trace_remote.h
index 55754ef78104..15a579633123 100644
--- a/include/linux/trace_remote.h
+++ b/include/linux/trace_remote.h
@@ -3,11 +3,14 @@
#ifndef _LINUX_TRACE_REMOTE_H
#define _LINUX_TRACE_REMOTE_H
+#include <linux/dcache.h>
#include <linux/ring_buffer.h>
/**
* struct trace_remote_callbacks - Callbacks used by Tracefs to control the remote
*
+ * @init: Called once the remote has been registered. Allows the
+ * caller to extend the Tracefs remote directory
* @load_trace_buffer: Called before Tracefs accesses the trace buffer for the first
* time. Must return a &trace_buffer_desc
* (most likely filled with trace_remote_alloc_buffer())
@@ -23,6 +26,7 @@
* new reader-page from the @cpu ring-buffer.
*/
struct trace_remote_callbacks {
+ int (*init)(struct dentry *d, void *priv);
struct trace_buffer_desc *(*load_trace_buffer)(unsigned long size, void *priv);
void (*unload_trace_buffer)(struct trace_buffer_desc *desc, void *priv);
int (*enable_tracing)(bool enable, void *priv);
diff --git a/kernel/trace/trace_remote.c b/kernel/trace/trace_remote.c
index c011df20df95..55ba3fd9e002 100644
--- a/kernel/trace/trace_remote.c
+++ b/kernel/trace/trace_remote.c
@@ -837,6 +837,7 @@ static int trace_remote_init_tracefs(const char *name, struct trace_remote *remo
int trace_remote_register(const char *name, struct trace_remote_callbacks *cbs, void *priv)
{
struct trace_remote *remote;
+ int ret;
remote = kzalloc(sizeof(*remote), GFP_KERNEL);
if (!remote)
@@ -854,7 +855,11 @@ int trace_remote_register(const char *name, struct trace_remote_callbacks *cbs,
return -ENOMEM;
}
- return 0;
+ ret = cbs->init ? cbs->init(remote->dentry, priv) : 0;
+ if (ret)
+ pr_err("Init failed for trace remote '%s' (%d)\n", name, ret);
+
+ return ret;
}
void trace_remote_free_buffer(struct trace_buffer_desc *desc)
--
2.51.0.618.g983fd99d29-goog
^ permalink raw reply related [flat|nested] 33+ messages in thread* [PATCH v7 09/28] tracing: Add events to trace remotes
2025-10-03 13:37 [PATCH v7 00/28] Tracefs support for pKVM Vincent Donnefort
` (7 preceding siblings ...)
2025-10-03 13:38 ` [PATCH v7 08/28] tracing: Add init callback " Vincent Donnefort
@ 2025-10-03 13:38 ` Vincent Donnefort
2025-10-03 13:38 ` [PATCH v7 10/28] tracing: Add events/ root files " Vincent Donnefort
` (18 subsequent siblings)
27 siblings, 0 replies; 33+ messages in thread
From: Vincent Donnefort @ 2025-10-03 13:38 UTC (permalink / raw)
To: rostedt, mhiramat, mathieu.desnoyers, linux-trace-kernel, maz,
oliver.upton, joey.gouly, suzuki.poulose, yuzenghui
Cc: kvmarm, linux-arm-kernel, jstultz, qperret, will, aneesh.kumar,
kernel-team, linux-kernel, Vincent Donnefort
An event is predefined point in the writer code that allows to log
data. Following the same scheme as kernel events, add remote events,
described to user-space within the events/ tracefs directory found in
the corresponding trace remote.
Remote events are expected to be described during the trace remote
registration.
Add also a .enable_event callback for trace_remote to toggle the event
logging, if supported.
Signed-off-by: Vincent Donnefort <vdonnefort@google.com>
diff --git a/include/linux/trace_remote.h b/include/linux/trace_remote.h
index 15a579633123..456df837fc6b 100644
--- a/include/linux/trace_remote.h
+++ b/include/linux/trace_remote.h
@@ -5,6 +5,7 @@
#include <linux/dcache.h>
#include <linux/ring_buffer.h>
+#include <linux/trace_remote_event.h>
/**
* struct trace_remote_callbacks - Callbacks used by Tracefs to control the remote
@@ -24,6 +25,8 @@
* @reset: Called on `echo 0 > trace`. It is expected from the
* remote to reset all ring-buffer pages.
* new reader-page from the @cpu ring-buffer.
+ * @enable_event: Called on events/event_name/enable. It is expected from
+ * the remote to allow the writing event @id.
*/
struct trace_remote_callbacks {
int (*init)(struct dentry *d, void *priv);
@@ -32,6 +35,7 @@ struct trace_remote_callbacks {
int (*enable_tracing)(bool enable, void *priv);
int (*swap_reader_page)(unsigned int cpu, void *priv);
int (*reset)(unsigned int cpu, void *priv);
+ int (*enable_event)(unsigned short id, bool enable, void *priv);
};
/**
@@ -53,7 +57,8 @@ struct trace_remote_callbacks {
*
* Return: 0 on success, negative error code on failure.
*/
-int trace_remote_register(const char *name, struct trace_remote_callbacks *cbs, void *priv);
+int trace_remote_register(const char *name, struct trace_remote_callbacks *cbs, void *priv,
+ struct remote_event *events, size_t nr_events);
/**
* trace_remote_alloc_buffer() - Dynamically allocate a trace buffer
diff --git a/include/linux/trace_remote_event.h b/include/linux/trace_remote_event.h
new file mode 100644
index 000000000000..a4449008a075
--- /dev/null
+++ b/include/linux/trace_remote_event.h
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef _LINUX_TRACE_REMOTE_EVENTS_H
+#define _LINUX_TRACE_REMOTE_EVENTS_H
+
+struct trace_remote;
+struct trace_event_fields;
+
+struct remote_event_hdr {
+ unsigned short id;
+};
+
+#define REMOTE_EVENT_NAME_MAX 30
+struct remote_event {
+ char name[REMOTE_EVENT_NAME_MAX];
+ unsigned short id;
+ bool enabled;
+ struct trace_remote *remote;
+ struct trace_event_fields *fields;
+ char *print_fmt;
+ void (*print)(void *evt, struct trace_seq *seq);
+};
+#endif
diff --git a/kernel/trace/trace_remote.c b/kernel/trace/trace_remote.c
index 55ba3fd9e002..9504d9c04bef 100644
--- a/kernel/trace/trace_remote.c
+++ b/kernel/trace/trace_remote.c
@@ -33,6 +33,7 @@ struct trace_remote_iterator {
struct ring_buffer_iter **rb_iters;
struct ring_buffer_iter *rb_iter;
};
+ struct remote_event_hdr *evt;
int cpu;
int evt_cpu;
loff_t pos;
@@ -44,6 +45,10 @@ struct trace_remote {
void *priv;
struct trace_buffer *trace_buffer;
struct trace_buffer_desc *trace_buffer_desc;
+ struct dentry *dentry;
+ struct eventfs_inode *eventfs;
+ struct remote_event *events;
+ unsigned long nr_events;
unsigned long trace_buffer_size;
struct ring_buffer_remote rb_remote;
struct mutex lock;
@@ -168,7 +173,8 @@ static void trace_remote_reset(struct trace_remote *remote, int cpu)
static ssize_t
tracing_on_write(struct file *filp, const char __user *ubuf, size_t cnt, loff_t *ppos)
{
- struct trace_remote *remote = filp->private_data;
+ struct seq_file *seq = filp->private_data;
+ struct trace_remote *remote = seq->private;
unsigned long val;
int ret;
@@ -197,7 +203,8 @@ DEFINE_SHOW_STORE_ATTRIBUTE(tracing_on);
static ssize_t buffer_size_kb_write(struct file *filp, const char __user *ubuf, size_t cnt,
loff_t *ppos)
{
- struct trace_remote *remote = filp->private_data;
+ struct seq_file *seq = filp->private_data;
+ struct trace_remote *remote = seq->private;
unsigned long val;
int ret;
@@ -484,16 +491,19 @@ __peek_event(struct trace_remote_iterator *iter, int cpu, u64 *ts, unsigned long
static bool trace_remote_iter_read_event(struct trace_remote_iterator *iter)
{
struct trace_buffer *trace_buffer = iter->remote->trace_buffer;
+ struct ring_buffer_event *rb_evt;
int cpu = iter->cpu;
if (cpu != RING_BUFFER_ALL_CPUS) {
if (ring_buffer_empty_cpu(trace_buffer, cpu))
return false;
- if (!__peek_event(iter, cpu, &iter->ts, &iter->lost_events))
+ rb_evt = __peek_event(iter, cpu, &iter->ts, &iter->lost_events);
+ if (!rb_evt)
return false;
iter->evt_cpu = cpu;
+ iter->evt = (struct remote_event_hdr *)ring_buffer_event_data(rb_evt);
return true;
}
@@ -505,7 +515,8 @@ static bool trace_remote_iter_read_event(struct trace_remote_iterator *iter)
if (ring_buffer_empty_cpu(trace_buffer, cpu))
continue;
- if (!__peek_event(iter, cpu, &ts, &lost_events))
+ rb_evt = __peek_event(iter, cpu, &ts, &lost_events);
+ if (!rb_evt)
continue;
if (ts >= iter->ts)
@@ -513,6 +524,7 @@ static bool trace_remote_iter_read_event(struct trace_remote_iterator *iter)
iter->ts = ts;
iter->evt_cpu = cpu;
+ iter->evt = (struct remote_event_hdr *)ring_buffer_event_data(rb_evt);
iter->lost_events = lost_events;
}
@@ -533,8 +545,11 @@ static void trace_remote_iter_move(struct trace_remote_iterator *iter)
}
}
+static struct remote_event *trace_remote_find_event(struct trace_remote *remote, unsigned short id);
+
static int trace_remote_iter_print_event(struct trace_remote_iterator *iter)
{
+ struct remote_event *evt;
unsigned long usecs_rem;
u64 ts = iter->ts;
@@ -548,6 +563,12 @@ static int trace_remote_iter_print_event(struct trace_remote_iterator *iter)
trace_seq_printf(&iter->seq, "[%03d]\t%5llu.%06lu: ", iter->evt_cpu,
ts, usecs_rem);
+ evt = trace_remote_find_event(iter->remote, iter->evt->id);
+ if (!evt)
+ trace_seq_printf(&iter->seq, "UNKNOWN id=%d\n", iter->evt->id);
+ else
+ evt->print(iter->evt, &iter->seq);
+
return trace_seq_has_overflowed(&iter->seq) ? -EOVERFLOW : 0;
}
@@ -821,6 +842,8 @@ static int trace_remote_init_tracefs(const char *name, struct trace_remote *remo
goto err;
}
+ remote->dentry = remote_d;
+
return 0;
err:
@@ -834,7 +857,11 @@ static int trace_remote_init_tracefs(const char *name, struct trace_remote *remo
return -ENOMEM;
}
-int trace_remote_register(const char *name, struct trace_remote_callbacks *cbs, void *priv)
+static int trace_remote_register_events(const char *remote_name, struct trace_remote *remote,
+ struct remote_event *events, size_t nr_events);
+
+int trace_remote_register(const char *name, struct trace_remote_callbacks *cbs, void *priv,
+ struct remote_event *events, size_t nr_events)
{
struct trace_remote *remote;
int ret;
@@ -855,6 +882,13 @@ int trace_remote_register(const char *name, struct trace_remote_callbacks *cbs,
return -ENOMEM;
}
+ ret = trace_remote_register_events(name, remote, events, nr_events);
+ if (ret) {
+ pr_err("Failed to register events for trace remote '%s' (%d)\n",
+ name, ret);
+ return ret;
+ }
+
ret = cbs->init ? cbs->init(remote->dentry, priv) : 0;
if (ret)
pr_err("Init failed for trace remote '%s' (%d)\n", name, ret);
@@ -926,3 +960,220 @@ int trace_remote_alloc_buffer(struct trace_buffer_desc *desc, size_t desc_size,
trace_remote_free_buffer(desc);
return ret;
}
+
+static int
+trace_remote_enable_event(struct trace_remote *remote, struct remote_event *evt, bool enable)
+{
+ int ret;
+
+ lockdep_assert_held(&remote->lock);
+
+ if (evt->enabled == enable)
+ return 0;
+
+ ret = remote->cbs->enable_event(evt->id, enable, remote->priv);
+ if (ret)
+ return ret;
+
+ evt->enabled = enable;
+
+ return 0;
+}
+
+static int remote_event_enable_show(struct seq_file *s, void *unused)
+{
+ struct remote_event *evt = s->private;
+
+ seq_printf(s, "%d\n", evt->enabled);
+
+ return 0;
+}
+
+static ssize_t remote_event_enable_write(struct file *filp, const char __user *ubuf,
+ size_t count, loff_t *ppos)
+{
+ struct seq_file *seq = filp->private_data;
+ struct remote_event *evt = seq->private;
+ struct trace_remote *remote = evt->remote;
+ u8 enable;
+ int ret;
+
+ ret = kstrtou8_from_user(ubuf, count, 10, &enable);
+ if (ret)
+ return ret;
+
+ guard(mutex)(&remote->lock);
+
+ ret = trace_remote_enable_event(remote, evt, enable);
+ if (ret)
+ return ret;
+
+ return count;
+}
+DEFINE_SHOW_STORE_ATTRIBUTE(remote_event_enable);
+
+static int remote_event_id_show(struct seq_file *s, void *unused)
+{
+ struct remote_event *evt = s->private;
+
+ seq_printf(s, "%d\n", evt->id);
+
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(remote_event_id);
+
+static int remote_event_format_show(struct seq_file *s, void *unused)
+{
+ size_t offset = sizeof(struct remote_event_hdr);
+ struct remote_event *evt = s->private;
+ struct trace_event_fields *field;
+
+ seq_printf(s, "name: %s\n", evt->name);
+ seq_printf(s, "ID: %d\n", evt->id);
+ seq_puts(s,
+ "format:\n\tfield:unsigned short common_type;\toffset:0;\tsize:2;\tsigned:0;\n\n");
+
+ field = &evt->fields[0];
+ while (field->name) {
+ seq_printf(s, "\tfield:%s %s;\toffset:%zu;\tsize:%u;\tsigned:%d;\n",
+ field->type, field->name, offset, field->size,
+ !field->is_signed);
+ offset += field->size;
+ field++;
+ }
+
+ if (field != &evt->fields[0])
+ seq_puts(s, "\n");
+
+ seq_printf(s, "print fmt: %s\n", evt->print_fmt);
+
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(remote_event_format);
+
+static int remote_event_callback(const char *name, umode_t *mode, void **data,
+ const struct file_operations **fops)
+{
+ if (!strcmp(name, "enable")) {
+ *mode = TRACEFS_MODE_WRITE;
+ *fops = &remote_event_enable_fops;
+ return 1;
+ }
+
+ if (!strcmp(name, "id")) {
+ *mode = TRACEFS_MODE_READ;
+ *fops = &remote_event_id_fops;
+ return 1;
+ }
+
+ if (!strcmp(name, "format")) {
+ *mode = TRACEFS_MODE_READ;
+ *fops = &remote_event_id_fops;
+ return 1;
+ }
+
+ return 0;
+}
+
+static int trace_remote_init_eventfs(const char *remote_name, struct trace_remote *remote,
+ struct remote_event *evt)
+{
+ struct eventfs_inode *eventfs = remote->eventfs;
+ static struct eventfs_entry entries[] = {
+ {
+ .name = "enable",
+ .callback = remote_event_callback,
+ }, {
+ .name = "id",
+ .callback = remote_event_callback,
+ }, {
+ .name = "format",
+ .callback = remote_event_callback,
+ }
+ };
+ bool eventfs_create = false;
+
+ if (!eventfs) {
+ eventfs = eventfs_create_events_dir("events", remote->dentry, NULL, 0, NULL);
+ if (IS_ERR(eventfs))
+ return PTR_ERR(eventfs);
+
+ /*
+ * Create similar hierarchy as local events even if a single system is supported at
+ * the moment
+ */
+ eventfs = eventfs_create_dir(remote_name, eventfs, NULL, 0, NULL);
+ if (IS_ERR(eventfs))
+ return PTR_ERR(eventfs);
+
+ remote->eventfs = eventfs;
+ eventfs_create = true;
+ }
+
+ eventfs = eventfs_create_dir(evt->name, eventfs, entries, ARRAY_SIZE(entries), evt);
+ if (IS_ERR(eventfs)) {
+ if (eventfs_create) {
+ eventfs_remove_events_dir(remote->eventfs);
+ remote->eventfs = NULL;
+ }
+ return PTR_ERR(eventfs);
+ }
+
+ return 0;
+}
+
+static int trace_remote_attach_events(struct trace_remote *remote, struct remote_event *events,
+ size_t nr_events)
+{
+ int i;
+
+ for (i = 0; i < nr_events; i++) {
+ struct remote_event *evt = &events[i];
+
+ if (evt->remote)
+ return -EEXIST;
+
+ evt->remote = remote;
+
+ /* We need events to be sorted for efficient lookup */
+ if (i && evt->id <= events[i - 1].id)
+ return -EINVAL;
+ }
+
+ remote->events = events;
+ remote->nr_events = nr_events;
+
+ return 0;
+}
+
+static int trace_remote_register_events(const char *remote_name, struct trace_remote *remote,
+ struct remote_event *events, size_t nr_events)
+{
+ int i, ret;
+
+ ret = trace_remote_attach_events(remote, events, nr_events);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < nr_events; i++) {
+ struct remote_event *evt = &events[i];
+
+ ret = trace_remote_init_eventfs(remote_name, remote, evt);
+ if (ret)
+ pr_warn("Failed to init eventfs for event '%s' (%d)",
+ evt->name, ret);
+ }
+
+ return 0;
+}
+
+static int __cmp_events(const void *id, const void *evt)
+{
+ return (long)id - ((struct remote_event *)evt)->id;
+}
+
+static struct remote_event *trace_remote_find_event(struct trace_remote *remote, unsigned short id)
+{
+ return bsearch((const void *)(unsigned long)id, remote->events, remote->nr_events,
+ sizeof(*remote->events), __cmp_events);
+}
--
2.51.0.618.g983fd99d29-goog
^ permalink raw reply related [flat|nested] 33+ messages in thread* [PATCH v7 10/28] tracing: Add events/ root files to trace remotes
2025-10-03 13:37 [PATCH v7 00/28] Tracefs support for pKVM Vincent Donnefort
` (8 preceding siblings ...)
2025-10-03 13:38 ` [PATCH v7 09/28] tracing: Add events " Vincent Donnefort
@ 2025-10-03 13:38 ` Vincent Donnefort
2025-10-03 13:38 ` [PATCH v7 11/28] tracing: Add helpers to create trace remote events Vincent Donnefort
` (17 subsequent siblings)
27 siblings, 0 replies; 33+ messages in thread
From: Vincent Donnefort @ 2025-10-03 13:38 UTC (permalink / raw)
To: rostedt, mhiramat, mathieu.desnoyers, linux-trace-kernel, maz,
oliver.upton, joey.gouly, suzuki.poulose, yuzenghui
Cc: kvmarm, linux-arm-kernel, jstultz, qperret, will, aneesh.kumar,
kernel-team, linux-kernel, Vincent Donnefort
Just like for the kernel events directory, add 'enable', 'header_page'
and 'header_event' at the root of the trace remote events/ directory.
Signed-off-by: Vincent Donnefort <vdonnefort@google.com>
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
index eb556e691b5e..a26a6f537f87 100644
--- a/kernel/trace/ring_buffer.c
+++ b/kernel/trace/ring_buffer.c
@@ -604,7 +604,8 @@ int ring_buffer_print_page_header(struct trace_buffer *buffer, struct trace_seq
trace_seq_printf(s, "\tfield: char data;\t"
"offset:%u;\tsize:%u;\tsigned:%u;\n",
(unsigned int)offsetof(typeof(field), data),
- (unsigned int)buffer->subbuf_size,
+ (unsigned int)(buffer ? buffer->subbuf_size :
+ PAGE_SIZE - BUF_PAGE_HDR_SIZE),
(unsigned int)is_signed_type(char));
return !trace_seq_has_overflowed(s);
diff --git a/kernel/trace/trace_remote.c b/kernel/trace/trace_remote.c
index 9504d9c04bef..1bc9c9a5197f 100644
--- a/kernel/trace/trace_remote.c
+++ b/kernel/trace/trace_remote.c
@@ -1075,10 +1075,145 @@ static int remote_event_callback(const char *name, umode_t *mode, void **data,
return 0;
}
+static ssize_t remote_events_dir_enable_write(struct file *filp, const char __user *ubuf,
+ size_t count, loff_t *ppos)
+{
+ struct trace_remote *remote = file_inode(filp)->i_private;
+ int i, ret;
+ u8 enable;
+
+ ret = kstrtou8_from_user(ubuf, count, 10, &enable);
+ if (ret)
+ return ret;
+
+ guard(mutex)(&remote->lock);
+
+ for (i = 0; i < remote->nr_events; i++) {
+ struct remote_event *evt = &remote->events[i];
+
+ trace_remote_enable_event(remote, evt, enable);
+ }
+
+ return count;
+}
+
+static ssize_t remote_events_dir_enable_read(struct file *filp, char __user *ubuf, size_t cnt,
+ loff_t *ppos)
+{
+ struct trace_remote *remote = file_inode(filp)->i_private;
+ const char enabled_char[] = {'0', '1', 'X'};
+ char enabled_str[] = " \n";
+ int i, enabled = -1;
+
+ guard(mutex)(&remote->lock);
+
+ for (i = 0; i < remote->nr_events; i++) {
+ struct remote_event *evt = &remote->events[i];
+
+ if (enabled == -1) {
+ enabled = evt->enabled;
+ } else if (enabled != evt->enabled) {
+ enabled = 2;
+ break;
+ }
+ }
+
+ enabled_str[0] = enabled_char[enabled == -1 ? 0 : enabled];
+
+ return simple_read_from_buffer(ubuf, cnt, ppos, enabled_str, 2);
+}
+
+static const struct file_operations remote_events_dir_enable_fops = {
+ .write = remote_events_dir_enable_write,
+ .read = remote_events_dir_enable_read,
+};
+
+static ssize_t
+remote_events_dir_header_page_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
+{
+ struct trace_seq *s;
+ int ret;
+
+ s = kmalloc(sizeof(*s), GFP_KERNEL);
+ if (!s)
+ return -ENOMEM;
+
+ trace_seq_init(s);
+
+ ring_buffer_print_page_header(NULL, s);
+ ret = simple_read_from_buffer(ubuf, cnt, ppos, s->buffer, trace_seq_used(s));
+ kfree(s);
+
+ return ret;
+}
+
+static const struct file_operations remote_events_dir_header_page_fops = {
+ .read = remote_events_dir_header_page_read,
+};
+
+static ssize_t
+remote_events_dir_header_event_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
+{
+ struct trace_seq *s;
+ int ret;
+
+ s = kmalloc(sizeof(*s), GFP_KERNEL);
+ if (!s)
+ return -ENOMEM;
+
+ trace_seq_init(s);
+
+ ring_buffer_print_entry_header(s);
+ ret = simple_read_from_buffer(ubuf, cnt, ppos, s->buffer, trace_seq_used(s));
+ kfree(s);
+
+ return ret;
+}
+
+static const struct file_operations remote_events_dir_header_event_fops = {
+ .read = remote_events_dir_header_event_read,
+};
+
+static int remote_events_dir_callback(const char *name, umode_t *mode, void **data,
+ const struct file_operations **fops)
+{
+ if (!strcmp(name, "enable")) {
+ *mode = TRACEFS_MODE_WRITE;
+ *fops = &remote_events_dir_enable_fops;
+ return 1;
+ }
+
+ if (!strcmp(name, "header_page")) {
+ *mode = TRACEFS_MODE_READ;
+ *fops = &remote_events_dir_header_page_fops;
+ return 1;
+ }
+
+ if (!strcmp(name, "header_event")) {
+ *mode = TRACEFS_MODE_READ;
+ *fops = &remote_events_dir_header_event_fops;
+ return 1;
+ }
+
+ return 0;
+}
+
static int trace_remote_init_eventfs(const char *remote_name, struct trace_remote *remote,
struct remote_event *evt)
{
struct eventfs_inode *eventfs = remote->eventfs;
+ static struct eventfs_entry dir_entries[] = {
+ {
+ .name = "enable",
+ .callback = remote_events_dir_callback,
+ }, {
+ .name = "header_page",
+ .callback = remote_events_dir_callback,
+ }, {
+ .name = "header_event",
+ .callback = remote_events_dir_callback,
+ }
+ };
static struct eventfs_entry entries[] = {
{
.name = "enable",
@@ -1094,7 +1229,8 @@ static int trace_remote_init_eventfs(const char *remote_name, struct trace_remot
bool eventfs_create = false;
if (!eventfs) {
- eventfs = eventfs_create_events_dir("events", remote->dentry, NULL, 0, NULL);
+ eventfs = eventfs_create_events_dir("events", remote->dentry, dir_entries,
+ ARRAY_SIZE(dir_entries), remote);
if (IS_ERR(eventfs))
return PTR_ERR(eventfs);
--
2.51.0.618.g983fd99d29-goog
^ permalink raw reply related [flat|nested] 33+ messages in thread* [PATCH v7 11/28] tracing: Add helpers to create trace remote events
2025-10-03 13:37 [PATCH v7 00/28] Tracefs support for pKVM Vincent Donnefort
` (9 preceding siblings ...)
2025-10-03 13:38 ` [PATCH v7 10/28] tracing: Add events/ root files " Vincent Donnefort
@ 2025-10-03 13:38 ` Vincent Donnefort
2025-10-03 13:38 ` [PATCH v7 12/28] ring-buffer: Export buffer_data_page and macros Vincent Donnefort
` (16 subsequent siblings)
27 siblings, 0 replies; 33+ messages in thread
From: Vincent Donnefort @ 2025-10-03 13:38 UTC (permalink / raw)
To: rostedt, mhiramat, mathieu.desnoyers, linux-trace-kernel, maz,
oliver.upton, joey.gouly, suzuki.poulose, yuzenghui
Cc: kvmarm, linux-arm-kernel, jstultz, qperret, will, aneesh.kumar,
kernel-team, linux-kernel, Vincent Donnefort
Declaring remote events can be cumbersome let's add a set of macros to
simplify developers life. The declaration of a remote event is very
similar to kernel's events:
REMOTE_EVENT(name, id,
RE_STRUCT(
re_field(u64 foo)
),
RE_PRINTK("foo=%llu", __entry->foo)
)
Signed-off-by: Vincent Donnefort <vdonnefort@google.com>
diff --git a/include/linux/trace_remote_event.h b/include/linux/trace_remote_event.h
index a4449008a075..c8ae1e1f5e72 100644
--- a/include/linux/trace_remote_event.h
+++ b/include/linux/trace_remote_event.h
@@ -5,6 +5,7 @@
struct trace_remote;
struct trace_event_fields;
+struct trace_seq;
struct remote_event_hdr {
unsigned short id;
@@ -20,4 +21,13 @@ struct remote_event {
char *print_fmt;
void (*print)(void *evt, struct trace_seq *seq);
};
+
+#define RE_STRUCT(__args...) __args
+#define re_field(__type, __field) __type __field;
+
+#define REMOTE_EVENT_FORMAT(__name, __struct) \
+ struct remote_event_format_##__name { \
+ struct remote_event_hdr hdr; \
+ __struct \
+ }
#endif
diff --git a/include/trace/define_remote_events.h b/include/trace/define_remote_events.h
new file mode 100644
index 000000000000..03c9f5515c5a
--- /dev/null
+++ b/include/trace/define_remote_events.h
@@ -0,0 +1,73 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#include <linux/trace_events.h>
+#include <linux/trace_remote_event.h>
+#include <linux/trace_seq.h>
+#include <linux/stringify.h>
+
+#define REMOTE_EVENT_INCLUDE(__file) __stringify(../../__file)
+
+#ifdef REMOTE_EVENT_SECTION
+# define __REMOTE_EVENT_SECTION(__name) __used __section(REMOTE_EVENT_SECTION"."#__name)
+#else
+# define __REMOTE_EVENT_SECTION(__name)
+#endif
+
+#define __REMOTE_PRINTK_COUNT_ARGS(_0, _1, _2, _n, __args...) _n
+#define REMOTE_PRINTK_COUNT_ARGS(__args...) __REMOTE_PRINTK_COUNT_ARGS(, ##__args, 2, 1, 0)
+
+#define __remote_printk0() \
+ trace_seq_putc(seq, '\n')
+
+#define __remote_printk1(__fmt) \
+ trace_seq_puts(seq, " " __fmt "\n") \
+
+#define __remote_printk2(__fmt, __args...) \
+do { \
+ trace_seq_putc(seq, ' '); \
+ trace_seq_printf(seq, __fmt, __args); \
+ trace_seq_putc(seq, '\n'); \
+} while (0)
+
+/* Apply the appropriate trace_seq sequence according to the number of arguments */
+#define remote_printk(__args...) \
+ CONCATENATE(__remote_printk, REMOTE_PRINTK_COUNT_ARGS(__args))(__args)
+
+#define RE_PRINTK(__args...) __args
+
+#define REMOTE_EVENT(__name, __id, __struct, __printk) \
+ REMOTE_EVENT_FORMAT(__name, __struct); \
+ static void remote_event_print_##__name(void *evt, struct trace_seq *seq) \
+ { \
+ struct remote_event_format_##__name __maybe_unused *__entry = evt; \
+ trace_seq_puts(seq, #__name); \
+ remote_printk(__printk); \
+ }
+#include REMOTE_EVENT_INCLUDE(REMOTE_EVENT_INCLUDE_FILE)
+
+#undef REMOTE_EVENT
+#undef RE_PRINTK
+#undef re_field
+#define re_field(__type, __field) \
+ { \
+ .type = #__type, .name = #__field, \
+ .size = sizeof(__type), .align = __alignof__(__type), \
+ .is_signed = is_signed_type(__type), \
+ },
+#define __entry REC
+#define RE_PRINTK(__fmt, __args...) "\"" __fmt "\", " __stringify(__args)
+#define REMOTE_EVENT(__name, __id, __struct, __printk) \
+ static struct trace_event_fields remote_event_fields_##__name[] = { \
+ __struct \
+ {} \
+ }; \
+ static char remote_event_print_fmt_##__name[] = __printk; \
+ static struct remote_event __REMOTE_EVENT_SECTION(__name) \
+ remote_event_##__name = { \
+ .name = #__name, \
+ .id = __id, \
+ .fields = remote_event_fields_##__name, \
+ .print_fmt = remote_event_print_fmt_##__name, \
+ .print = remote_event_print_##__name, \
+ }
+#include REMOTE_EVENT_INCLUDE(REMOTE_EVENT_INCLUDE_FILE)
--
2.51.0.618.g983fd99d29-goog
^ permalink raw reply related [flat|nested] 33+ messages in thread* [PATCH v7 12/28] ring-buffer: Export buffer_data_page and macros
2025-10-03 13:37 [PATCH v7 00/28] Tracefs support for pKVM Vincent Donnefort
` (10 preceding siblings ...)
2025-10-03 13:38 ` [PATCH v7 11/28] tracing: Add helpers to create trace remote events Vincent Donnefort
@ 2025-10-03 13:38 ` Vincent Donnefort
2025-10-03 13:38 ` [PATCH v7 13/28] tracing: Introduce simple_ring_buffer Vincent Donnefort
` (15 subsequent siblings)
27 siblings, 0 replies; 33+ messages in thread
From: Vincent Donnefort @ 2025-10-03 13:38 UTC (permalink / raw)
To: rostedt, mhiramat, mathieu.desnoyers, linux-trace-kernel, maz,
oliver.upton, joey.gouly, suzuki.poulose, yuzenghui
Cc: kvmarm, linux-arm-kernel, jstultz, qperret, will, aneesh.kumar,
kernel-team, linux-kernel, Vincent Donnefort
In preparation for allowing the writing of ring-buffer compliant pages
outside of ring_buffer.c, move buffer_data_page and timestamps encoding
macros into the publicly available ring_buffer_types.h.
Signed-off-by: Vincent Donnefort <vdonnefort@google.com>
diff --git a/include/linux/ring_buffer_types.h b/include/linux/ring_buffer_types.h
new file mode 100644
index 000000000000..54577021a49d
--- /dev/null
+++ b/include/linux/ring_buffer_types.h
@@ -0,0 +1,41 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_RING_BUFFER_TYPES_H
+#define _LINUX_RING_BUFFER_TYPES_H
+
+#include <asm/local.h>
+
+#define TS_SHIFT 27
+#define TS_MASK ((1ULL << TS_SHIFT) - 1)
+#define TS_DELTA_TEST (~TS_MASK)
+
+/*
+ * We need to fit the time_stamp delta into 27 bits.
+ */
+static inline bool test_time_stamp(u64 delta)
+{
+ return !!(delta & TS_DELTA_TEST);
+}
+
+#define BUF_PAGE_HDR_SIZE offsetof(struct buffer_data_page, data)
+
+#define RB_EVNT_HDR_SIZE (offsetof(struct ring_buffer_event, array))
+#define RB_ALIGNMENT 4U
+#define RB_MAX_SMALL_DATA (RB_ALIGNMENT * RINGBUF_TYPE_DATA_TYPE_LEN_MAX)
+#define RB_EVNT_MIN_SIZE 8U /* two 32bit words */
+
+#ifndef CONFIG_HAVE_64BIT_ALIGNED_ACCESS
+# define RB_FORCE_8BYTE_ALIGNMENT 0
+# define RB_ARCH_ALIGNMENT RB_ALIGNMENT
+#else
+# define RB_FORCE_8BYTE_ALIGNMENT 1
+# define RB_ARCH_ALIGNMENT 8U
+#endif
+
+#define RB_ALIGN_DATA __aligned(RB_ARCH_ALIGNMENT)
+
+struct buffer_data_page {
+ u64 time_stamp; /* page time stamp */
+ local_t commit; /* write committed index */
+ unsigned char data[] RB_ALIGN_DATA; /* data of buffer page */
+};
+#endif
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
index a26a6f537f87..06aae6a2a308 100644
--- a/kernel/trace/ring_buffer.c
+++ b/kernel/trace/ring_buffer.c
@@ -4,6 +4,7 @@
*
* Copyright (C) 2008 Steven Rostedt <srostedt@redhat.com>
*/
+#include <linux/ring_buffer_types.h>
#include <linux/trace_recursion.h>
#include <linux/trace_events.h>
#include <linux/ring_buffer.h>
@@ -156,23 +157,6 @@ int ring_buffer_print_entry_header(struct trace_seq *s)
/* Used for individual buffers (after the counter) */
#define RB_BUFFER_OFF (1 << 20)
-#define BUF_PAGE_HDR_SIZE offsetof(struct buffer_data_page, data)
-
-#define RB_EVNT_HDR_SIZE (offsetof(struct ring_buffer_event, array))
-#define RB_ALIGNMENT 4U
-#define RB_MAX_SMALL_DATA (RB_ALIGNMENT * RINGBUF_TYPE_DATA_TYPE_LEN_MAX)
-#define RB_EVNT_MIN_SIZE 8U /* two 32bit words */
-
-#ifndef CONFIG_HAVE_64BIT_ALIGNED_ACCESS
-# define RB_FORCE_8BYTE_ALIGNMENT 0
-# define RB_ARCH_ALIGNMENT RB_ALIGNMENT
-#else
-# define RB_FORCE_8BYTE_ALIGNMENT 1
-# define RB_ARCH_ALIGNMENT 8U
-#endif
-
-#define RB_ALIGN_DATA __aligned(RB_ARCH_ALIGNMENT)
-
/* define RINGBUF_TYPE_DATA for 'case RINGBUF_TYPE_DATA:' */
#define RINGBUF_TYPE_DATA 0 ... RINGBUF_TYPE_DATA_TYPE_LEN_MAX
@@ -315,10 +299,6 @@ EXPORT_SYMBOL_GPL(ring_buffer_event_data);
#define for_each_online_buffer_cpu(buffer, cpu) \
for_each_cpu_and(cpu, buffer->cpumask, cpu_online_mask)
-#define TS_SHIFT 27
-#define TS_MASK ((1ULL << TS_SHIFT) - 1)
-#define TS_DELTA_TEST (~TS_MASK)
-
static u64 rb_event_time_stamp(struct ring_buffer_event *event)
{
u64 ts;
@@ -337,12 +317,6 @@ static u64 rb_event_time_stamp(struct ring_buffer_event *event)
#define RB_MISSED_MASK (3 << 30)
-struct buffer_data_page {
- u64 time_stamp; /* page time stamp */
- local_t commit; /* write committed index */
- unsigned char data[] RB_ALIGN_DATA; /* data of buffer page */
-};
-
struct buffer_data_read_page {
unsigned order; /* order of the page */
struct buffer_data_page *data; /* actual data, stored in this page */
@@ -401,14 +375,6 @@ static void free_buffer_page(struct buffer_page *bpage)
kfree(bpage);
}
-/*
- * We need to fit the time_stamp delta into 27 bits.
- */
-static inline bool test_time_stamp(u64 delta)
-{
- return !!(delta & TS_DELTA_TEST);
-}
-
struct rb_irq_work {
struct irq_work work;
wait_queue_head_t waiters;
--
2.51.0.618.g983fd99d29-goog
^ permalink raw reply related [flat|nested] 33+ messages in thread* [PATCH v7 13/28] tracing: Introduce simple_ring_buffer
2025-10-03 13:37 [PATCH v7 00/28] Tracefs support for pKVM Vincent Donnefort
` (11 preceding siblings ...)
2025-10-03 13:38 ` [PATCH v7 12/28] ring-buffer: Export buffer_data_page and macros Vincent Donnefort
@ 2025-10-03 13:38 ` Vincent Donnefort
2025-10-03 13:38 ` [PATCH v7 14/28] tracing: Add a trace remote module for testing Vincent Donnefort
` (14 subsequent siblings)
27 siblings, 0 replies; 33+ messages in thread
From: Vincent Donnefort @ 2025-10-03 13:38 UTC (permalink / raw)
To: rostedt, mhiramat, mathieu.desnoyers, linux-trace-kernel, maz,
oliver.upton, joey.gouly, suzuki.poulose, yuzenghui
Cc: kvmarm, linux-arm-kernel, jstultz, qperret, will, aneesh.kumar,
kernel-team, linux-kernel, Vincent Donnefort
Add a simple implementation of the kernel ring-buffer. This intends to
be used later by ring-buffer remotes such as the pKVM hypervisor, hence
the need for a cut down version (write only) without any dependency.
Signed-off-by: Vincent Donnefort <vdonnefort@google.com>
diff --git a/include/linux/simple_ring_buffer.h b/include/linux/simple_ring_buffer.h
new file mode 100644
index 000000000000..f324df2f875b
--- /dev/null
+++ b/include/linux/simple_ring_buffer.h
@@ -0,0 +1,113 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_SIMPLE_RING_BUFFER_H
+#define _LINUX_SIMPLE_RING_BUFFER_H
+
+#include <linux/list.h>
+#include <linux/ring_buffer.h>
+#include <linux/ring_buffer_types.h>
+#include <linux/types.h>
+
+/*
+ * Ideally those struct would stay private but the caller needs to know
+ * the allocation size for simple_ring_buffer_init().
+ */
+struct simple_buffer_page {
+ struct list_head link;
+ struct buffer_data_page *page;
+ u64 entries;
+ u32 write;
+ u32 id;
+};
+
+struct simple_rb_per_cpu {
+ struct simple_buffer_page *tail_page;
+ struct simple_buffer_page *reader_page;
+ struct simple_buffer_page *head_page;
+ struct simple_buffer_page *bpages;
+ struct trace_buffer_meta *meta;
+ u32 nr_pages;
+
+#define SIMPLE_RB_UNAVAILABLE 0
+#define SIMPLE_RB_READY 1
+#define SIMPLE_RB_WRITING 2
+ u32 status;
+
+ u64 last_overrun;
+ u64 write_stamp;
+
+ struct simple_rb_cbs *cbs;
+};
+
+/**
+ * simple_ring_buffer_init - Init @cpu_buffer based on @desc
+ *
+ * @cpu_buffer: A simple_rb_per_cpu buffer to init, allocated by the caller.
+ * @bpages: Array of simple_buffer_pages, with as many elements as @desc->nr_page_va
+ * @desc: A ring_buffer_desc
+ *
+ * Returns: 0 on success or -EINVAL if the content of @desc is invalid
+ */
+int simple_ring_buffer_init(struct simple_rb_per_cpu *cpu_buffer, struct simple_buffer_page *bpages,
+ const struct ring_buffer_desc *desc);
+
+/**
+ * simple_ring_buffer_unload - Prepare @cpu_buffer for deletion
+ *
+ * @cpu_buffer: A simple_rb_per_cpu that will be deleted.
+ */
+void simple_ring_buffer_unload(struct simple_rb_per_cpu *cpu_buffer);
+
+/**
+ * simple_ring_buffer_reserve - Reserve an entry in @cpu_buffer
+ *
+ * @cpu_buffer: A simple_rb_per_cpu
+ * @length: Size of the entry in bytes
+ * @timestamp: Timestamp of the entry
+ *
+ * Returns the address of the entry where to write data or NULL
+ */
+void *simple_ring_buffer_reserve(struct simple_rb_per_cpu *cpu_buffer, unsigned long length,
+ u64 timestamp);
+
+/**
+ * simple_ring_buffer_commit - Commit the entry reserved with simple_ring_buffer_reserve()
+ *
+ * @cpu_buffer: The simple_rb_per_cpu where the entry has been reserved
+ */
+void simple_ring_buffer_commit(struct simple_rb_per_cpu *cpu_buffer);
+
+/**
+ * simple_ring_buffer_enable_tracing - Enable or disable writing to @cpu_buffer
+ *
+ * @cpu_buffer: A simple_rb_per_cpu
+ * @enable: True to enable tracing, False to disable it
+ *
+ * Returns 0 on success or -ENODEV if @cpu_buffer was unloaded
+ */
+int simple_ring_buffer_enable_tracing(struct simple_rb_per_cpu *cpu_buffer, bool enable);
+
+/**
+ * simple_ring_buffer_reset - Reset @cpu_buffer
+ *
+ * @cpu_buffer: A simple_rb_per_cpu
+ *
+ * This will not clear the content of the data, only reset counters and pointers
+ *
+ * Returns 0 on success or -ENODEV if @cpu_buffer was unloaded.
+ */
+int simple_ring_buffer_reset(struct simple_rb_per_cpu *cpu_buffer);
+
+/**
+ * simple_ring_buffer_swap_reader_page - Swap ring-buffer head with the reader
+ *
+ * This function enables consuming reading. It ensures the current head page will not be overwritten
+ * and can be safely read.
+ *
+ * @cpu_buffer: A simple_rb_per_cpu
+ *
+ * Returns 0 on success, -ENODEV if @cpu_buffer was unloaded or -EBUSY if we failed to catch the
+ * head page.
+ */
+int simple_ring_buffer_swap_reader_page(struct simple_rb_per_cpu *cpu_buffer);
+
+#endif
diff --git a/kernel/trace/Kconfig b/kernel/trace/Kconfig
index 99af56d39eaf..918afcc1fcaf 100644
--- a/kernel/trace/Kconfig
+++ b/kernel/trace/Kconfig
@@ -1241,4 +1241,7 @@ source "kernel/trace/rv/Kconfig"
config TRACE_REMOTE
bool
+config SIMPLE_RING_BUFFER
+ bool
+
endif # FTRACE
diff --git a/kernel/trace/Makefile b/kernel/trace/Makefile
index 6dab341acc46..03d7d80a9436 100644
--- a/kernel/trace/Makefile
+++ b/kernel/trace/Makefile
@@ -111,4 +111,5 @@ obj-$(CONFIG_TRACEPOINT_BENCHMARK) += trace_benchmark.o
obj-$(CONFIG_RV) += rv/
obj-$(CONFIG_TRACE_REMOTE) += trace_remote.o
+obj-$(CONFIG_SIMPLE_RING_BUFFER) += simple_ring_buffer.o
libftrace-y := ftrace.o
diff --git a/kernel/trace/simple_ring_buffer.c b/kernel/trace/simple_ring_buffer.c
new file mode 100644
index 000000000000..20e3cd6071a2
--- /dev/null
+++ b/kernel/trace/simple_ring_buffer.c
@@ -0,0 +1,408 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2025 - Google LLC
+ * Author: Vincent Donnefort <vdonnefort@google.com>
+ */
+
+#include <linux/atomic.h>
+#include <linux/simple_ring_buffer.h>
+
+#include <asm/barrier.h>
+#include <asm/local.h>
+
+enum simple_rb_link_type {
+ SIMPLE_RB_LINK_NORMAL = 0,
+ SIMPLE_RB_LINK_HEAD = 1,
+ SIMPLE_RB_LINK_HEAD_MOVING
+};
+
+#define SIMPLE_RB_LINK_MASK ~(SIMPLE_RB_LINK_HEAD | SIMPLE_RB_LINK_HEAD_MOVING)
+
+static void simple_bpage_set_head_link(struct simple_buffer_page *bpage)
+{
+ unsigned long link = (unsigned long)bpage->link.next;
+
+ link &= SIMPLE_RB_LINK_MASK;
+ link |= SIMPLE_RB_LINK_HEAD;
+
+ /*
+ * Paired with simple_rb_find_head() to order access between the head
+ * link and overrun. It ensures we always report an up-to-date value
+ * after swapping the reader page.
+ */
+ smp_store_release(&bpage->link.next, (struct list_head *)link);
+}
+
+static bool simple_bpage_unset_head_link(struct simple_buffer_page *bpage,
+ struct simple_buffer_page *dst,
+ enum simple_rb_link_type new_type)
+{
+ unsigned long *link = (unsigned long *)(&bpage->link.next);
+ unsigned long old = (*link & SIMPLE_RB_LINK_MASK) | SIMPLE_RB_LINK_HEAD;
+ unsigned long new = (unsigned long)(&dst->link) | new_type;
+
+ return try_cmpxchg(link, &old, new);
+}
+
+static void simple_bpage_set_normal_link(struct simple_buffer_page *bpage)
+{
+ unsigned long link = (unsigned long)bpage->link.next;
+
+ WRITE_ONCE(bpage->link.next, (struct list_head *)(link & SIMPLE_RB_LINK_MASK));
+}
+
+static struct simple_buffer_page *simple_bpage_from_link(struct list_head *link)
+{
+ unsigned long ptr = (unsigned long)link & SIMPLE_RB_LINK_MASK;
+
+ return container_of((struct list_head *)ptr, struct simple_buffer_page, link);
+}
+
+static struct simple_buffer_page *simple_bpage_next_page(struct simple_buffer_page *bpage)
+{
+ return simple_bpage_from_link(bpage->link.next);
+}
+
+static void simple_bpage_reset(struct simple_buffer_page *bpage)
+{
+ bpage->write = 0;
+ bpage->entries = 0;
+
+ local_set(&bpage->page->commit, 0);
+}
+
+static void simple_bpage_init(struct simple_buffer_page *bpage, unsigned long page)
+{
+ INIT_LIST_HEAD(&bpage->link);
+ bpage->page = (struct buffer_data_page *)page;
+
+ simple_bpage_reset(bpage);
+}
+
+#define simple_rb_meta_inc(__meta, __inc) \
+ WRITE_ONCE((__meta), (__meta + __inc))
+
+static bool simple_rb_loaded(struct simple_rb_per_cpu *cpu_buffer)
+{
+ return !!cpu_buffer->bpages;
+}
+
+static int simple_rb_find_head(struct simple_rb_per_cpu *cpu_buffer)
+{
+ int retry = cpu_buffer->nr_pages * 2;
+ struct simple_buffer_page *head;
+
+ head = cpu_buffer->head_page;
+
+ while (retry--) {
+ unsigned long link;
+
+spin:
+ /* See smp_store_release in simple_bpage_set_head_link() */
+ link = (unsigned long)smp_load_acquire(&head->link.prev->next);
+
+ switch (link & ~SIMPLE_RB_LINK_MASK) {
+ /* Found the head */
+ case SIMPLE_RB_LINK_HEAD:
+ cpu_buffer->head_page = head;
+ return 0;
+ /* The writer caught the head, we can spin, that won't be long */
+ case SIMPLE_RB_LINK_HEAD_MOVING:
+ goto spin;
+ }
+
+ head = simple_bpage_next_page(head);
+ }
+
+ return -EBUSY;
+}
+
+int simple_ring_buffer_swap_reader_page(struct simple_rb_per_cpu *cpu_buffer)
+{
+ struct simple_buffer_page *last, *head, *reader;
+ unsigned long overrun;
+ int retry = 8;
+ int ret;
+
+ if (!simple_rb_loaded(cpu_buffer))
+ return -ENODEV;
+
+ reader = cpu_buffer->reader_page;
+
+ do {
+ /* Run after the writer to find the head */
+ ret = simple_rb_find_head(cpu_buffer);
+ if (ret)
+ return ret;
+
+ head = cpu_buffer->head_page;
+
+ /* Connect the reader page around the header page */
+ reader->link.next = head->link.next;
+ reader->link.prev = head->link.prev;
+
+ /* The last page before the head */
+ last = simple_bpage_from_link(head->link.prev);
+
+ /* The reader page points to the new header page */
+ simple_bpage_set_head_link(reader);
+
+ overrun = cpu_buffer->meta->overrun;
+ } while (!simple_bpage_unset_head_link(last, reader, SIMPLE_RB_LINK_NORMAL) && retry--);
+
+ if (!retry)
+ return -EINVAL;
+
+ cpu_buffer->head_page = simple_bpage_from_link(reader->link.next);
+ cpu_buffer->head_page->link.prev = &reader->link;
+ cpu_buffer->reader_page = head;
+ cpu_buffer->meta->reader.lost_events = overrun - cpu_buffer->last_overrun;
+ cpu_buffer->meta->reader.id = cpu_buffer->reader_page->id;
+ cpu_buffer->last_overrun = overrun;
+
+ return 0;
+}
+
+static struct simple_buffer_page *simple_rb_move_tail(struct simple_rb_per_cpu *cpu_buffer)
+{
+ struct simple_buffer_page *tail, *new_tail;
+
+ tail = cpu_buffer->tail_page;
+ new_tail = simple_bpage_next_page(tail);
+
+ if (simple_bpage_unset_head_link(tail, new_tail, SIMPLE_RB_LINK_HEAD_MOVING)) {
+ /*
+ * Oh no! we've caught the head. There is none anymore and
+ * swap_reader will spin until we set the new one. Overrun must
+ * be written first, to make sure we report the correct number
+ * of lost events.
+ */
+ simple_rb_meta_inc(cpu_buffer->meta->overrun, new_tail->entries);
+ simple_rb_meta_inc(cpu_buffer->meta->pages_lost, 1);
+
+ simple_bpage_set_head_link(new_tail);
+ simple_bpage_set_normal_link(tail);
+ }
+
+ simple_bpage_reset(new_tail);
+ cpu_buffer->tail_page = new_tail;
+
+ simple_rb_meta_inc(cpu_buffer->meta->pages_touched, 1);
+
+ return new_tail;
+}
+
+static unsigned long rb_event_size(unsigned long length)
+{
+ struct ring_buffer_event *event;
+
+ return length + RB_EVNT_HDR_SIZE + sizeof(event->array[0]);
+}
+
+static struct ring_buffer_event *
+rb_event_add_ts_extend(struct ring_buffer_event *event, u64 delta)
+{
+ event->type_len = RINGBUF_TYPE_TIME_EXTEND;
+ event->time_delta = delta & TS_MASK;
+ event->array[0] = delta >> TS_SHIFT;
+
+ return (struct ring_buffer_event *)((unsigned long)event + 8);
+}
+
+static struct ring_buffer_event *
+simple_rb_reserve_next(struct simple_rb_per_cpu *cpu_buffer, unsigned long length, u64 timestamp)
+{
+ unsigned long ts_ext_size = 0, event_size = rb_event_size(length);
+ struct simple_buffer_page *tail = cpu_buffer->tail_page;
+ struct ring_buffer_event *event;
+ u32 write, prev_write;
+ u64 time_delta;
+
+ time_delta = timestamp - cpu_buffer->write_stamp;
+
+ if (test_time_stamp(time_delta))
+ ts_ext_size = 8;
+
+ prev_write = tail->write;
+ write = prev_write + event_size + ts_ext_size;
+
+ if (unlikely(write > (PAGE_SIZE - BUF_PAGE_HDR_SIZE)))
+ tail = simple_rb_move_tail(cpu_buffer);
+
+ if (!tail->entries) {
+ tail->page->time_stamp = timestamp;
+ time_delta = 0;
+ ts_ext_size = 0;
+ write = event_size;
+ prev_write = 0;
+ }
+
+ tail->write = write;
+ tail->entries++;
+
+ cpu_buffer->write_stamp = timestamp;
+
+ event = (struct ring_buffer_event *)(tail->page->data + prev_write);
+ if (ts_ext_size) {
+ event = rb_event_add_ts_extend(event, time_delta);
+ time_delta = 0;
+ }
+
+ event->type_len = 0;
+ event->time_delta = time_delta;
+ event->array[0] = event_size - RB_EVNT_HDR_SIZE;
+
+ return event;
+}
+
+void *simple_ring_buffer_reserve(struct simple_rb_per_cpu *cpu_buffer, unsigned long length,
+ u64 timestamp)
+{
+ struct ring_buffer_event *rb_event;
+
+ if (cmpxchg(&cpu_buffer->status, SIMPLE_RB_READY, SIMPLE_RB_WRITING) != SIMPLE_RB_READY)
+ return NULL;
+
+ rb_event = simple_rb_reserve_next(cpu_buffer, length, timestamp);
+
+ return &rb_event->array[1];
+}
+
+void simple_ring_buffer_commit(struct simple_rb_per_cpu *cpu_buffer)
+{
+ local_set(&cpu_buffer->tail_page->page->commit,
+ cpu_buffer->tail_page->write);
+ simple_rb_meta_inc(cpu_buffer->meta->entries, 1);
+
+ /*
+ * Paired with simple_rb_enable_tracing() to ensure data is
+ * written to the ring-buffer before teardown.
+ */
+ smp_store_release(&cpu_buffer->status, SIMPLE_RB_READY);
+}
+
+static u32 simple_rb_enable_tracing(struct simple_rb_per_cpu *cpu_buffer, bool enable)
+{
+ u32 prev_status;
+
+ if (enable)
+ return cmpxchg(&cpu_buffer->status, SIMPLE_RB_UNAVAILABLE, SIMPLE_RB_READY);
+
+ /* Wait for the buffer to be released */
+ do {
+ prev_status = cmpxchg_acquire(&cpu_buffer->status,
+ SIMPLE_RB_READY,
+ SIMPLE_RB_UNAVAILABLE);
+ } while (prev_status == SIMPLE_RB_WRITING);
+
+ return prev_status;
+}
+
+int simple_ring_buffer_reset(struct simple_rb_per_cpu *cpu_buffer)
+{
+ struct simple_buffer_page *bpage;
+ u32 prev_status;
+ int ret;
+
+ if (!simple_rb_loaded(cpu_buffer))
+ return -ENODEV;
+
+ prev_status = simple_rb_enable_tracing(cpu_buffer, false);
+
+ ret = simple_rb_find_head(cpu_buffer);
+ if (ret)
+ return ret;
+
+ bpage = cpu_buffer->tail_page = cpu_buffer->head_page;
+ do {
+ simple_bpage_reset(bpage);
+ bpage = simple_bpage_next_page(bpage);
+ } while (bpage != cpu_buffer->head_page);
+
+ simple_bpage_reset(cpu_buffer->reader_page);
+
+ cpu_buffer->last_overrun = 0;
+ cpu_buffer->write_stamp = 0;
+
+ cpu_buffer->meta->reader.read = 0;
+ cpu_buffer->meta->reader.lost_events = 0;
+ cpu_buffer->meta->entries = 0;
+ cpu_buffer->meta->overrun = 0;
+ cpu_buffer->meta->read = 0;
+ cpu_buffer->meta->pages_lost = 0;
+ cpu_buffer->meta->pages_touched = 0;
+
+ if (prev_status == SIMPLE_RB_READY)
+ simple_rb_enable_tracing(cpu_buffer, true);
+
+ return 0;
+}
+
+int simple_ring_buffer_init(struct simple_rb_per_cpu *cpu_buffer, struct simple_buffer_page *bpages,
+ const struct ring_buffer_desc *desc)
+{
+ struct simple_buffer_page *bpage = bpages;
+ int i;
+
+ /* At least 1 reader page and two pages in the ring-buffer */
+ if (desc->nr_page_va < 3)
+ return -EINVAL;
+
+ memset(cpu_buffer, 0, sizeof(*cpu_buffer));
+
+ cpu_buffer->bpages = bpages;
+
+ cpu_buffer->meta = (void *)desc->meta_va;
+ memset(cpu_buffer->meta, 0, sizeof(*cpu_buffer->meta));
+ cpu_buffer->meta->meta_page_size = PAGE_SIZE;
+ cpu_buffer->meta->nr_subbufs = cpu_buffer->nr_pages;
+
+ /* The reader page is not part of the ring initially */
+ simple_bpage_init(bpage, desc->page_va[0]);
+ bpage->id = 0;
+
+ cpu_buffer->nr_pages = 1;
+
+ cpu_buffer->reader_page = bpage;
+ cpu_buffer->tail_page = bpage + 1;
+ cpu_buffer->head_page = bpage + 1;
+
+ for (i = 1; i < desc->nr_page_va; i++) {
+ simple_bpage_init(++bpage, desc->page_va[i]);
+
+ bpage->link.next = &(bpage + 1)->link;
+ bpage->link.prev = &(bpage - 1)->link;
+ bpage->id = i;
+
+ cpu_buffer->nr_pages = i + 1;
+ }
+
+ /* Close the ring */
+ bpage->link.next = &cpu_buffer->tail_page->link;
+ cpu_buffer->tail_page->link.prev = &bpage->link;
+
+ /* The last init'ed page points to the head page */
+ simple_bpage_set_head_link(bpage);
+
+ return 0;
+}
+
+void simple_ring_buffer_unload(struct simple_rb_per_cpu *cpu_buffer)
+{
+ if (!simple_rb_loaded(cpu_buffer))
+ return;
+
+ simple_rb_enable_tracing(cpu_buffer, false);
+
+ cpu_buffer->bpages = NULL;
+}
+
+int simple_ring_buffer_enable_tracing(struct simple_rb_per_cpu *cpu_buffer, bool enable)
+{
+ if (!simple_rb_loaded(cpu_buffer))
+ return -ENODEV;
+
+ simple_rb_enable_tracing(cpu_buffer, enable);
+
+ return 0;
+}
--
2.51.0.618.g983fd99d29-goog
^ permalink raw reply related [flat|nested] 33+ messages in thread* [PATCH v7 14/28] tracing: Add a trace remote module for testing
2025-10-03 13:37 [PATCH v7 00/28] Tracefs support for pKVM Vincent Donnefort
` (12 preceding siblings ...)
2025-10-03 13:38 ` [PATCH v7 13/28] tracing: Introduce simple_ring_buffer Vincent Donnefort
@ 2025-10-03 13:38 ` Vincent Donnefort
2025-10-16 21:06 ` Steven Rostedt
2025-10-03 13:38 ` [PATCH v7 15/28] tracing: selftests: Add trace remote tests Vincent Donnefort
` (13 subsequent siblings)
27 siblings, 1 reply; 33+ messages in thread
From: Vincent Donnefort @ 2025-10-03 13:38 UTC (permalink / raw)
To: rostedt, mhiramat, mathieu.desnoyers, linux-trace-kernel, maz,
oliver.upton, joey.gouly, suzuki.poulose, yuzenghui
Cc: kvmarm, linux-arm-kernel, jstultz, qperret, will, aneesh.kumar,
kernel-team, linux-kernel, Vincent Donnefort
Add a module to help testing the tracefs support for trace remotes. This
module:
* Use simple_ring_buffer to write into a ring-buffer.
* Declare a single "selftest" event that can be triggered from
user-space.
* Register a "test" trace remote.
This is intended to be used by trace remote selftests.
Signed-off-by: Vincent Donnefort <vdonnefort@google.com>
diff --git a/kernel/trace/Kconfig b/kernel/trace/Kconfig
index 918afcc1fcaf..52131d89993c 100644
--- a/kernel/trace/Kconfig
+++ b/kernel/trace/Kconfig
@@ -1244,4 +1244,12 @@ config TRACE_REMOTE
config SIMPLE_RING_BUFFER
bool
+config TRACE_REMOTE_TEST
+ tristate "Test module for remote tracing"
+ select TRACE_REMOTE
+ select SIMPLE_RING_BUFFER
+ help
+ This trace remote includes a ring-buffer writer implementation using
+ "simple_ring_buffer". This is solely intending for testing.
+
endif # FTRACE
diff --git a/kernel/trace/Makefile b/kernel/trace/Makefile
index 03d7d80a9436..53534447e70b 100644
--- a/kernel/trace/Makefile
+++ b/kernel/trace/Makefile
@@ -112,4 +112,6 @@ obj-$(CONFIG_RV) += rv/
obj-$(CONFIG_TRACE_REMOTE) += trace_remote.o
obj-$(CONFIG_SIMPLE_RING_BUFFER) += simple_ring_buffer.o
+obj-$(CONFIG_TRACE_REMOTE_TEST) += remote_test.o
+
libftrace-y := ftrace.o
diff --git a/kernel/trace/remote_test.c b/kernel/trace/remote_test.c
new file mode 100644
index 000000000000..059127489c99
--- /dev/null
+++ b/kernel/trace/remote_test.c
@@ -0,0 +1,259 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2025 - Google LLC
+ * Author: Vincent Donnefort <vdonnefort@google.com>
+ */
+
+#include <linux/module.h>
+#include <linux/simple_ring_buffer.h>
+#include <linux/trace_remote.h>
+#include <linux/tracefs.h>
+#include <linux/types.h>
+
+#define REMOTE_EVENT_INCLUDE_FILE kernel/trace/remote_test_events.h
+#include <trace/define_remote_events.h>
+
+static DEFINE_PER_CPU(struct simple_rb_per_cpu *, simple_rbs);
+static struct trace_buffer_desc *remote_test_buffer_desc;
+
+/*
+ * The trace_remote lock already serializes accesses from the trace_remote_callbacks.
+ * However write_event can still race with load/unload.
+ */
+static DEFINE_MUTEX(simple_rbs_lock);
+
+static int remote_test_load_simple_rb(int cpu, struct ring_buffer_desc *rb_desc)
+{
+ struct simple_rb_per_cpu *cpu_buffer;
+ struct simple_buffer_page *bpages;
+ int ret = -ENOMEM;
+
+ cpu_buffer = kmalloc(sizeof(*cpu_buffer), GFP_KERNEL);
+ if (!cpu_buffer)
+ return ret;
+
+ bpages = kmalloc_array(rb_desc->nr_page_va, sizeof(*bpages), GFP_KERNEL);
+ if (!bpages)
+ goto err_free_cpu_buffer;
+
+ ret = simple_ring_buffer_init(cpu_buffer, bpages, rb_desc);
+ if (ret)
+ goto err_free_bpages;
+
+ scoped_guard(mutex, &simple_rbs_lock)
+ *per_cpu_ptr(&simple_rbs, cpu) = cpu_buffer;
+
+ return 0;
+
+err_free_bpages:
+ kfree(bpages);
+
+err_free_cpu_buffer:
+ kfree(cpu_buffer);
+
+ return ret;
+}
+
+static void remote_test_unload_simple_rb(int cpu)
+{
+ struct simple_rb_per_cpu *cpu_buffer = *per_cpu_ptr(&simple_rbs, cpu);
+ struct simple_buffer_page *bpages;
+
+ if (!cpu_buffer)
+ return;
+
+ guard(mutex)(&simple_rbs_lock);
+
+ bpages = cpu_buffer->bpages;
+ simple_ring_buffer_unload(cpu_buffer);
+ kfree(bpages);
+ kfree(cpu_buffer);
+ *per_cpu_ptr(&simple_rbs, cpu) = NULL;
+}
+
+static struct trace_buffer_desc *remote_test_load(unsigned long size, void *unused)
+{
+ struct ring_buffer_desc *rb_desc;
+ struct trace_buffer_desc *desc;
+ size_t desc_size;
+ int cpu, ret;
+
+ if (WARN_ON(remote_test_buffer_desc))
+ return ERR_PTR(-EINVAL);
+
+ desc_size = trace_buffer_desc_size(size, num_possible_cpus());
+ if (desc_size == SIZE_MAX) {
+ ret = -E2BIG;
+ goto err_unlock_cpus;
+ }
+
+ desc = kmalloc(desc_size, GFP_KERNEL);
+ if (!desc) {
+ ret = -ENOMEM;
+ goto err_unlock_cpus;
+ }
+
+ ret = trace_remote_alloc_buffer(desc, desc_size, size, cpu_possible_mask);
+ if (ret)
+ goto err_free_desc;
+
+ for_each_ring_buffer_desc(rb_desc, cpu, desc) {
+ ret = remote_test_load_simple_rb(rb_desc->cpu, rb_desc);
+ if (ret)
+ goto err;
+ }
+
+ remote_test_buffer_desc = desc;
+
+ return remote_test_buffer_desc;
+
+err:
+ for_each_ring_buffer_desc(rb_desc, cpu, remote_test_buffer_desc)
+ remote_test_unload_simple_rb(rb_desc->cpu);
+ trace_remote_free_buffer(remote_test_buffer_desc);
+
+err_free_desc:
+ kfree(desc);
+
+err_unlock_cpus:
+ cpus_read_unlock();
+
+ return ERR_PTR(ret);
+}
+
+static void remote_test_unload(struct trace_buffer_desc *desc, void *unused)
+{
+ struct ring_buffer_desc *rb_desc;
+ int cpu;
+
+ if (WARN_ON(desc != remote_test_buffer_desc))
+ return;
+
+ for_each_ring_buffer_desc(rb_desc, cpu, desc)
+ remote_test_unload_simple_rb(rb_desc->cpu);
+
+ remote_test_buffer_desc = NULL;
+ trace_remote_free_buffer(desc);
+ kfree(desc);
+}
+
+static int remote_test_enable_tracing(bool enable, void *unused)
+{
+ struct ring_buffer_desc *rb_desc;
+ int cpu;
+
+ if (!remote_test_buffer_desc)
+ return -ENODEV;
+
+ for_each_ring_buffer_desc(rb_desc, cpu, remote_test_buffer_desc)
+ WARN_ON(simple_ring_buffer_enable_tracing(*per_cpu_ptr(&simple_rbs, rb_desc->cpu),
+ enable));
+ return 0;
+}
+
+static int remote_test_swap_reader_page(unsigned int cpu, void *unused)
+{
+ struct simple_rb_per_cpu *cpu_buffer;
+
+ if (cpu >= NR_CPUS)
+ return -EINVAL;
+
+ cpu_buffer = *per_cpu_ptr(&simple_rbs, cpu);
+ if (!cpu_buffer)
+ return -EINVAL;
+
+ return simple_ring_buffer_swap_reader_page(cpu_buffer);
+}
+
+static int remote_test_reset(unsigned int cpu, void *unused)
+{
+ struct simple_rb_per_cpu *cpu_buffer;
+
+ if (cpu >= NR_CPUS)
+ return -EINVAL;
+
+ cpu_buffer = *per_cpu_ptr(&simple_rbs, cpu);
+ if (!cpu_buffer)
+ return -EINVAL;
+
+ return simple_ring_buffer_reset(cpu_buffer);
+}
+
+static int remote_test_enable_event(unsigned short id, bool enable, void *unused)
+{
+ if (id != REMOTE_TEST_EVENT_ID)
+ return -EINVAL;
+
+ /*
+ * Let's just use the struct remote_event enabled field that is turned on and off by
+ * trace_remote. This is a bit racy but good enough for a simple test module.
+ */
+ return 0;
+}
+
+static ssize_t
+write_event_write(struct file *filp, const char __user *ubuf, size_t cnt, loff_t *pos)
+{
+ struct remote_event_format_selftest *evt_test;
+ struct simple_rb_per_cpu *cpu_buffer;
+ unsigned long val;
+ int ret;
+
+ ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
+ if (ret)
+ return ret;
+
+ guard(mutex)(&simple_rbs_lock);
+
+ if (!remote_event_selftest.enabled)
+ return -ENODEV;
+
+ cpu_buffer = *this_cpu_ptr(&simple_rbs);
+ if (!cpu_buffer)
+ return -ENODEV;
+
+ evt_test = simple_ring_buffer_reserve(cpu_buffer,
+ sizeof(struct remote_event_format_selftest),
+ trace_clock_global());
+ if (!evt_test)
+ return -ENODEV;
+
+ evt_test->hdr.id = REMOTE_TEST_EVENT_ID;
+ evt_test->id = val;
+
+ simple_ring_buffer_commit(cpu_buffer);
+
+ return cnt;
+}
+
+static const struct file_operations write_event_fops = {
+ .write = write_event_write,
+};
+
+static int remote_test_init_tracefs(struct dentry *d, void *unused)
+{
+ return tracefs_create_file("write_event", 0200, d, NULL, &write_event_fops) ?
+ 0 : -ENOMEM;
+}
+
+static struct trace_remote_callbacks trace_remote_callbacks = {
+ .init = remote_test_init_tracefs,
+ .load_trace_buffer = remote_test_load,
+ .unload_trace_buffer = remote_test_unload,
+ .enable_tracing = remote_test_enable_tracing,
+ .swap_reader_page = remote_test_swap_reader_page,
+ .reset = remote_test_reset,
+ .enable_event = remote_test_enable_event,
+};
+
+static int __init remote_test_init(void)
+{
+ return trace_remote_register("test", &trace_remote_callbacks, NULL,
+ &remote_event_selftest, 1);
+}
+
+module_init(remote_test_init);
+
+MODULE_DESCRIPTION("Test module for the trace remote interface");
+MODULE_AUTHOR("Vincent Donnefort");
+MODULE_LICENSE("GPL");
diff --git a/kernel/trace/remote_test_events.h b/kernel/trace/remote_test_events.h
new file mode 100644
index 000000000000..bb68aac4a25c
--- /dev/null
+++ b/kernel/trace/remote_test_events.h
@@ -0,0 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#define REMOTE_TEST_EVENT_ID 1
+
+REMOTE_EVENT(selftest, REMOTE_TEST_EVENT_ID,
+ RE_STRUCT(
+ re_field(u64, id)
+ ),
+ RE_PRINTK("id=%lld", __entry->id)
+);
--
2.51.0.618.g983fd99d29-goog
^ permalink raw reply related [flat|nested] 33+ messages in thread* Re: [PATCH v7 14/28] tracing: Add a trace remote module for testing
2025-10-03 13:38 ` [PATCH v7 14/28] tracing: Add a trace remote module for testing Vincent Donnefort
@ 2025-10-16 21:06 ` Steven Rostedt
2025-10-16 21:11 ` Steven Rostedt
0 siblings, 1 reply; 33+ messages in thread
From: Steven Rostedt @ 2025-10-16 21:06 UTC (permalink / raw)
To: Vincent Donnefort
Cc: mhiramat, mathieu.desnoyers, linux-trace-kernel, maz,
oliver.upton, joey.gouly, suzuki.poulose, yuzenghui, kvmarm,
linux-arm-kernel, jstultz, qperret, will, aneesh.kumar,
kernel-team, linux-kernel
On Fri, 3 Oct 2025 14:38:11 +0100
Vincent Donnefort <vdonnefort@google.com> wrote:
> diff --git a/kernel/trace/Kconfig b/kernel/trace/Kconfig
> index 918afcc1fcaf..52131d89993c 100644
> --- a/kernel/trace/Kconfig
> +++ b/kernel/trace/Kconfig
> @@ -1244,4 +1244,12 @@ config TRACE_REMOTE
> config SIMPLE_RING_BUFFER
> bool
>
> +config TRACE_REMOTE_TEST
> + tristate "Test module for remote tracing"
> + select TRACE_REMOTE
> + select SIMPLE_RING_BUFFER
> + help
> + This trace remote includes a ring-buffer writer implementation using
> + "simple_ring_buffer". This is solely intending for testing.
> +
Nit, this should go up a few places so that it's with the other "test module" selections.
-- Steve
^ permalink raw reply [flat|nested] 33+ messages in thread
* Re: [PATCH v7 14/28] tracing: Add a trace remote module for testing
2025-10-16 21:06 ` Steven Rostedt
@ 2025-10-16 21:11 ` Steven Rostedt
2025-10-17 8:36 ` Vincent Donnefort
0 siblings, 1 reply; 33+ messages in thread
From: Steven Rostedt @ 2025-10-16 21:11 UTC (permalink / raw)
To: Vincent Donnefort
Cc: mhiramat, mathieu.desnoyers, linux-trace-kernel, maz,
oliver.upton, joey.gouly, suzuki.poulose, yuzenghui, kvmarm,
linux-arm-kernel, jstultz, qperret, will, aneesh.kumar,
kernel-team, linux-kernel
On Thu, 16 Oct 2025 17:06:45 -0400
Steven Rostedt <rostedt@goodmis.org> wrote:
> On Fri, 3 Oct 2025 14:38:11 +0100
> Vincent Donnefort <vdonnefort@google.com> wrote:
>
> > diff --git a/kernel/trace/Kconfig b/kernel/trace/Kconfig
> > index 918afcc1fcaf..52131d89993c 100644
> > --- a/kernel/trace/Kconfig
> > +++ b/kernel/trace/Kconfig
> > @@ -1244,4 +1244,12 @@ config TRACE_REMOTE
> > config SIMPLE_RING_BUFFER
> > bool
> >
> > +config TRACE_REMOTE_TEST
> > + tristate "Test module for remote tracing"
> > + select TRACE_REMOTE
> > + select SIMPLE_RING_BUFFER
> > + help
> > + This trace remote includes a ring-buffer writer implementation using
> > + "simple_ring_buffer". This is solely intending for testing.
> > +
>
> Nit, this should go up a few places so that it's with the other "test module" selections.
And when I tried to build it, I hit this:
GEN .vmlinux.objs
MODPOST Module.symvers
ERROR: modpost: "simple_ring_buffer_reserve" [kernel/trace/remote_test.ko] undefined!
ERROR: modpost: "simple_ring_buffer_commit" [kernel/trace/remote_test.ko] undefined!
ERROR: modpost: "simple_ring_buffer_unload" [kernel/trace/remote_test.ko] undefined!
ERROR: modpost: "trace_remote_free_buffer" [kernel/trace/remote_test.ko] undefined!
ERROR: modpost: "trace_remote_alloc_buffer" [kernel/trace/remote_test.ko] undefined!
ERROR: modpost: "simple_ring_buffer_init" [kernel/trace/remote_test.ko] undefined!
ERROR: modpost: "trace_remote_register" [kernel/trace/remote_test.ko] undefined!
ERROR: modpost: "simple_ring_buffer_reset" [kernel/trace/remote_test.ko] undefined!
ERROR: modpost: "simple_ring_buffer_swap_reader_page" [kernel/trace/remote_test.ko] undefined!
ERROR: modpost: "simple_ring_buffer_enable_tracing" [kernel/trace/remote_test.ko] undefined!
WARNING: modpost: suppressed 1 unresolved symbol warnings because there were too many)
make[3]: *** [/work/git/linux-trace.git/scripts/Makefile.modpost:147: Module.symvers] Error 1
make[2]: *** [/work/git/linux-trace.git/Makefile:1960: modpost] Error 2
make[1]: *** [/work/git/linux-trace.git/Makefile:248: __sub-make] Error 2
make[1]: Leaving directory '/work/build/nobackup/debiantesting-x86-64'
make: *** [Makefile:248: __sub-make] Error 2
-- Steve
^ permalink raw reply [flat|nested] 33+ messages in thread
* Re: [PATCH v7 14/28] tracing: Add a trace remote module for testing
2025-10-16 21:11 ` Steven Rostedt
@ 2025-10-17 8:36 ` Vincent Donnefort
2025-10-17 9:14 ` Steven Rostedt
0 siblings, 1 reply; 33+ messages in thread
From: Vincent Donnefort @ 2025-10-17 8:36 UTC (permalink / raw)
To: Steven Rostedt
Cc: mhiramat, mathieu.desnoyers, linux-trace-kernel, maz,
oliver.upton, joey.gouly, suzuki.poulose, yuzenghui, kvmarm,
linux-arm-kernel, jstultz, qperret, will, aneesh.kumar,
kernel-team, linux-kernel
On Thu, Oct 16, 2025 at 05:11:55PM -0400, Steven Rostedt wrote:
> On Thu, 16 Oct 2025 17:06:45 -0400
> Steven Rostedt <rostedt@goodmis.org> wrote:
>
> > On Fri, 3 Oct 2025 14:38:11 +0100
> > Vincent Donnefort <vdonnefort@google.com> wrote:
> >
> > > diff --git a/kernel/trace/Kconfig b/kernel/trace/Kconfig
> > > index 918afcc1fcaf..52131d89993c 100644
> > > --- a/kernel/trace/Kconfig
> > > +++ b/kernel/trace/Kconfig
> > > @@ -1244,4 +1244,12 @@ config TRACE_REMOTE
> > > config SIMPLE_RING_BUFFER
> > > bool
> > >
> > > +config TRACE_REMOTE_TEST
> > > + tristate "Test module for remote tracing"
> > > + select TRACE_REMOTE
> > > + select SIMPLE_RING_BUFFER
> > > + help
> > > + This trace remote includes a ring-buffer writer implementation using
> > > + "simple_ring_buffer". This is solely intending for testing.
> > > +
> >
> > Nit, this should go up a few places so that it's with the other "test module" selections.
>
> And when I tried to build it, I hit this:
>
> GEN .vmlinux.objs
> MODPOST Module.symvers
> ERROR: modpost: "simple_ring_buffer_reserve" [kernel/trace/remote_test.ko] undefined!
> ERROR: modpost: "simple_ring_buffer_commit" [kernel/trace/remote_test.ko] undefined!
> ERROR: modpost: "simple_ring_buffer_unload" [kernel/trace/remote_test.ko] undefined!
> ERROR: modpost: "trace_remote_free_buffer" [kernel/trace/remote_test.ko] undefined!
> ERROR: modpost: "trace_remote_alloc_buffer" [kernel/trace/remote_test.ko] undefined!
> ERROR: modpost: "simple_ring_buffer_init" [kernel/trace/remote_test.ko] undefined!
> ERROR: modpost: "trace_remote_register" [kernel/trace/remote_test.ko] undefined!
> ERROR: modpost: "simple_ring_buffer_reset" [kernel/trace/remote_test.ko] undefined!
> ERROR: modpost: "simple_ring_buffer_swap_reader_page" [kernel/trace/remote_test.ko] undefined!
> ERROR: modpost: "simple_ring_buffer_enable_tracing" [kernel/trace/remote_test.ko] undefined!
> WARNING: modpost: suppressed 1 unresolved symbol warnings because there were too many)
> make[3]: *** [/work/git/linux-trace.git/scripts/Makefile.modpost:147: Module.symvers] Error 1
> make[2]: *** [/work/git/linux-trace.git/Makefile:1960: modpost] Error 2
> make[1]: *** [/work/git/linux-trace.git/Makefile:248: __sub-make] Error 2
> make[1]: Leaving directory '/work/build/nobackup/debiantesting-x86-64'
> make: *** [Makefile:248: __sub-make] Error 2
And of course, I forgot to check CONFIG_REMOTE_TEST=M ...
The following snippet should do.
Do you want a v8 now (and with your previous comment) or shall I wait a bit more?
--
diff --git a/fs/tracefs/inode.c b/fs/tracefs/inode.c
index 0c023941a316..aa305be834f9 100644
--- a/fs/tracefs/inode.c
+++ b/fs/tracefs/inode.c
@@ -665,6 +665,7 @@ struct dentry *tracefs_create_file(const char *name, umode_t mode,
fsnotify_create(d_inode(dentry->d_parent), dentry);
return tracefs_end_creating(dentry);
}
+EXPORT_SYMBOL_GPL(tracefs_create_file);
static struct dentry *__create_dir(const char *name, struct dentry *parent,
const struct inode_operations *ops)
diff --git a/kernel/trace/simple_ring_buffer.c b/kernel/trace/simple_ring_buffer.c
index c2ec6017c37c..02303161aab7 100644
--- a/kernel/trace/simple_ring_buffer.c
+++ b/kernel/trace/simple_ring_buffer.c
@@ -162,6 +162,7 @@ int simple_ring_buffer_swap_reader_page(struct simple_rb_per_cpu *cpu_buffer)
return 0;
}
+EXPORT_SYMBOL_GPL(simple_ring_buffer_swap_reader_page);
static struct simple_buffer_page *simple_rb_move_tail(struct simple_rb_per_cpu *cpu_buffer)
{
@@ -267,6 +268,7 @@ void *simple_ring_buffer_reserve(struct simple_rb_per_cpu *cpu_buffer, unsigned
return &rb_event->array[1];
}
+EXPORT_SYMBOL_GPL(simple_ring_buffer_reserve);
void simple_ring_buffer_commit(struct simple_rb_per_cpu *cpu_buffer)
{
@@ -280,6 +282,7 @@ void simple_ring_buffer_commit(struct simple_rb_per_cpu *cpu_buffer)
*/
smp_store_release(&cpu_buffer->status, SIMPLE_RB_READY);
}
+EXPORT_SYMBOL_GPL(simple_ring_buffer_commit);
static u32 simple_rb_enable_tracing(struct simple_rb_per_cpu *cpu_buffer, bool enable)
{
@@ -337,6 +340,7 @@ int simple_ring_buffer_reset(struct simple_rb_per_cpu *cpu_buffer)
return 0;
}
+EXPORT_SYMBOL_GPL(simple_ring_buffer_reset);
int __simple_ring_buffer_init(struct simple_rb_per_cpu *cpu_buffer,
struct simple_buffer_page *bpages,
@@ -427,6 +431,7 @@ int simple_ring_buffer_init(struct simple_rb_per_cpu *cpu_buffer, struct simple_
{
return __simple_ring_buffer_init(cpu_buffer, bpages, desc, __load_page, __unload_page);
}
+EXPORT_SYMBOL_GPL(simple_ring_buffer_init);
void __simple_ring_buffer_unload(struct simple_rb_per_cpu *cpu_buffer,
void (*unload_page)(void *))
@@ -449,6 +454,7 @@ void simple_ring_buffer_unload(struct simple_rb_per_cpu *cpu_buffer)
{
return __simple_ring_buffer_unload(cpu_buffer, __unload_page);
}
+EXPORT_SYMBOL_GPL(simple_ring_buffer_unload);
int simple_ring_buffer_enable_tracing(struct simple_rb_per_cpu *cpu_buffer, bool enable)
{
@@ -459,3 +465,4 @@ int simple_ring_buffer_enable_tracing(struct simple_rb_per_cpu *cpu_buffer, bool
return 0;
}
+EXPORT_SYMBOL_GPL(simple_ring_buffer_enable_tracing);
diff --git a/kernel/trace/trace_remote.c b/kernel/trace/trace_remote.c
index dc6bc387ebca..e54cc3e75dc5 100644
--- a/kernel/trace/trace_remote.c
+++ b/kernel/trace/trace_remote.c
@@ -895,6 +895,7 @@ int trace_remote_register(const char *name, struct trace_remote_callbacks *cbs,
return ret;
}
+EXPORT_SYMBOL_GPL(trace_remote_register);
void trace_remote_free_buffer(struct trace_buffer_desc *desc)
{
@@ -910,6 +911,7 @@ void trace_remote_free_buffer(struct trace_buffer_desc *desc)
free_page(rb_desc->page_va[id]);
}
}
+EXPORT_SYMBOL_GPL(trace_remote_free_buffer);
int trace_remote_alloc_buffer(struct trace_buffer_desc *desc, size_t desc_size, size_t buffer_size,
const struct cpumask *cpumask)
@@ -960,6 +962,7 @@ int trace_remote_alloc_buffer(struct trace_buffer_desc *desc, size_t desc_size,
trace_remote_free_buffer(desc);
return ret;
}
+EXPORT_SYMBOL_GPL(trace_remote_alloc_buffer);
static int
trace_remote_enable_event(struct trace_remote *remote, struct remote_event *evt, bool enable)
^ permalink raw reply related [flat|nested] 33+ messages in thread* Re: [PATCH v7 14/28] tracing: Add a trace remote module for testing
2025-10-17 8:36 ` Vincent Donnefort
@ 2025-10-17 9:14 ` Steven Rostedt
0 siblings, 0 replies; 33+ messages in thread
From: Steven Rostedt @ 2025-10-17 9:14 UTC (permalink / raw)
To: Vincent Donnefort
Cc: mhiramat, mathieu.desnoyers, linux-trace-kernel, maz,
oliver.upton, joey.gouly, suzuki.poulose, yuzenghui, kvmarm,
linux-arm-kernel, jstultz, qperret, will, aneesh.kumar,
kernel-team, linux-kernel
On Fri, 17 Oct 2025 09:36:27 +0100
Vincent Donnefort <vdonnefort@google.com> wrote:
> And of course, I forgot to check CONFIG_REMOTE_TEST=M ...
It also needs to export symbols.
>
> The following snippet should do.
>
> Do you want a v8 now (and with your previous comment) or shall I wait a bit more?
Wait a little more. I'm currently taking a long weekend and will likely
play with it a bit more next week.
-- Steve
^ permalink raw reply [flat|nested] 33+ messages in thread
* [PATCH v7 15/28] tracing: selftests: Add trace remote tests
2025-10-03 13:37 [PATCH v7 00/28] Tracefs support for pKVM Vincent Donnefort
` (13 preceding siblings ...)
2025-10-03 13:38 ` [PATCH v7 14/28] tracing: Add a trace remote module for testing Vincent Donnefort
@ 2025-10-03 13:38 ` Vincent Donnefort
2025-10-03 13:38 ` [PATCH v7 16/28] Documentation: tracing: Add tracing remotes Vincent Donnefort
` (12 subsequent siblings)
27 siblings, 0 replies; 33+ messages in thread
From: Vincent Donnefort @ 2025-10-03 13:38 UTC (permalink / raw)
To: rostedt, mhiramat, mathieu.desnoyers, linux-trace-kernel, maz,
oliver.upton, joey.gouly, suzuki.poulose, yuzenghui
Cc: kvmarm, linux-arm-kernel, jstultz, qperret, will, aneesh.kumar,
kernel-team, linux-kernel, Vincent Donnefort, Shuah Khan,
linux-kselftest
Exercise the tracefs interface for trace remote with a set of tests to
check:
* loading/unloading (unloading.tc)
* reset (reset.tc)
* size changes (buffer_size.tc)
* consuming read (trace_pipe.tc)
* non-consuming read (trace.tc)
Cc: Shuah Khan <skhan@linuxfoundation.org>
Cc: linux-kselftest@vger.kernel.org
Signed-off-by: Vincent Donnefort <vdonnefort@google.com>
diff --git a/tools/testing/selftests/ftrace/test.d/remotes/buffer_size.tc b/tools/testing/selftests/ftrace/test.d/remotes/buffer_size.tc
new file mode 100644
index 000000000000..1a43280ffa97
--- /dev/null
+++ b/tools/testing/selftests/ftrace/test.d/remotes/buffer_size.tc
@@ -0,0 +1,25 @@
+#!/bin/sh
+# SPDX-License-Identifier: GPL-2.0
+# description: Test trace remote buffer size
+# requires: remotes/test
+
+. $TEST_DIR/remotes/functions
+
+test_buffer_size()
+{
+ echo 0 > tracing_on
+ assert_unloaded
+
+ echo 4096 > buffer_size_kb
+ echo 1 > tracing_on
+ assert_loaded
+
+ echo 0 > tracing_on
+ echo 7 > buffer_size_kb
+}
+
+if [ -z "$SOURCE_REMOTE_TEST" ]; then
+ set -e
+ setup_remote_test
+ test_buffer_size
+fi
diff --git a/tools/testing/selftests/ftrace/test.d/remotes/functions b/tools/testing/selftests/ftrace/test.d/remotes/functions
new file mode 100644
index 000000000000..97a09d564a34
--- /dev/null
+++ b/tools/testing/selftests/ftrace/test.d/remotes/functions
@@ -0,0 +1,88 @@
+# SPDX-License-Identifier: GPL-2.0
+
+setup_remote()
+{
+ local name=$1
+
+ [ -e $TRACING_DIR/remotes/$name/write_event ] || exit_unresolved
+
+ cd remotes/$name/
+ echo 0 > tracing_on
+ clear_trace
+ echo 7 > buffer_size_kb
+ echo 0 > events/enable
+ echo 1 > events/$name/selftest/enable
+ echo 1 > tracing_on
+}
+
+setup_remote_test()
+{
+ [ -d $TRACING_DIR/remotes/test/ ] || modprobe remote_test || exit_unresolved
+
+ setup_remote "test"
+}
+
+assert_loaded()
+{
+ grep -q "(loaded)" buffer_size_kb
+}
+
+assert_unloaded()
+{
+ grep -q "(unloaded)" buffer_size_kb
+}
+
+dump_trace_pipe()
+{
+ output=$(mktemp $TMPDIR/remote_test.XXXXXX)
+ cat trace_pipe > $output &
+ pid=$!
+ sleep 1
+ kill -1 $pid
+
+ echo $output
+}
+
+check_trace()
+{
+ start_id="$1"
+ end_id="$2"
+ file="$3"
+
+ # Ensure the file is not empty
+ test -n "$(head $file)"
+
+ prev_ts=0
+ id=0
+
+ # Only keep <timestamp> <id>
+ tmp=$(mktemp $TMPDIR/remote_test.XXXXXX)
+ sed -e 's/\[[0-9]*\]\s*\([0-9]*.[0-9]*\): [a-z]* id=\([0-9]*\)/\1 \2/' $file > $tmp
+
+ while IFS= read -r line; do
+ ts=$(echo $line | cut -d ' ' -f 1)
+ id=$(echo $line | cut -d ' ' -f 2)
+
+ test $(echo "$ts>$prev_ts" | bc) -eq 1
+ test $id -eq $start_id
+
+ prev_ts=$ts
+ start_id=$((start_id + 1))
+ done < $tmp
+
+ test $id -eq $end_id
+ rm $tmp
+}
+
+get_cpu_ids()
+{
+ sed -n 's/^processor\s*:\s*\([0-9]\+\).*/\1/p' /proc/cpuinfo
+}
+
+get_page_size() {
+ sed -ne 's/^.*data.*size:\([0-9][0-9]*\).*/\1/p' events/header_page
+}
+
+get_selftest_event_size() {
+ sed -ne 's/^.*field:.*;.*size:\([0-9][0-9]*\);.*/\1/p' events/*/selftest/format | awk '{s+=$1} END {print s}'
+}
diff --git a/tools/testing/selftests/ftrace/test.d/remotes/reset.tc b/tools/testing/selftests/ftrace/test.d/remotes/reset.tc
new file mode 100644
index 000000000000..4d176349b2bc
--- /dev/null
+++ b/tools/testing/selftests/ftrace/test.d/remotes/reset.tc
@@ -0,0 +1,90 @@
+#!/bin/sh
+# SPDX-License-Identifier: GPL-2.0
+# description: Test trace remote reset
+# requires: remotes/test
+
+. $TEST_DIR/remotes/functions
+
+check_reset()
+{
+ write_event_path="write_event"
+ taskset=""
+
+ clear_trace
+
+ # Is the buffer empty?
+ output=$(dump_trace_pipe)
+ test $(wc -l $output | cut -d ' ' -f1) -eq 0
+
+ if $(echo $(pwd) | grep -q "per_cpu/cpu"); then
+ write_event_path="../../write_event"
+ cpu_id=$(echo $(pwd) | sed -e 's/.*per_cpu\/cpu//')
+ taskset="taskset -c $cpu_id"
+ fi
+ rm $output
+
+ # Can we properly write a new event?
+ $taskset echo 7890 > $write_event_path
+ output=$(dump_trace_pipe)
+ test $(wc -l $output | cut -d ' ' -f1) -eq 1
+ grep -q "id=7890" $output
+ rm $output
+}
+
+test_global_interface()
+{
+ output=$(mktemp $TMPDIR/remote_test.XXXXXX)
+
+ # Confidence check
+ echo 123456 > write_event
+ output=$(dump_trace_pipe)
+ grep -q "id=123456" $output
+ rm $output
+
+ # Reset single event
+ echo 1 > write_event
+ check_reset
+
+ # Reset lost events
+ for i in $(seq 1 10000); do
+ echo 1 > write_event
+ done
+ check_reset
+}
+
+test_percpu_interface()
+{
+ [ "$(get_cpu_ids | wc -l)" -ge 2 ] || return 0
+
+ for cpu in $(get_cpu_ids); do
+ taskset -c $cpu echo 1 > write_event
+ done
+
+ check_non_empty=0
+ for cpu in $(get_cpu_ids); do
+ cd per_cpu/cpu$cpu/
+
+ if [ $check_non_empty -eq 0 ]; then
+ check_reset
+ check_non_empty=1
+ else
+ # Check we have only reset 1 CPU
+ output=$(dump_trace_pipe)
+ test $(wc -l $output | cut -d ' ' -f1) -eq 1
+ rm $output
+ fi
+ cd -
+ done
+}
+
+test_reset()
+{
+ test_global_interface
+ test_percpu_interface
+}
+
+if [ -z "$SOURCE_REMOTE_TEST" ]; then
+ set -e
+ setup_remote_test
+ test_reset
+fi
diff --git a/tools/testing/selftests/ftrace/test.d/remotes/trace.tc b/tools/testing/selftests/ftrace/test.d/remotes/trace.tc
new file mode 100644
index 000000000000..081133ec45ff
--- /dev/null
+++ b/tools/testing/selftests/ftrace/test.d/remotes/trace.tc
@@ -0,0 +1,127 @@
+#!/bin/sh
+# SPDX-License-Identifier: GPL-2.0
+# description: Test trace remote non-consuming read
+# requires: remotes/test
+
+. $TEST_DIR/remotes/functions
+
+test_trace()
+{
+ echo 0 > tracing_on
+ assert_unloaded
+
+ echo 7 > buffer_size_kb
+ echo 1 > tracing_on
+ assert_loaded
+
+ # Simple test: Emit few events and try to read them
+ for i in $(seq 1 8); do
+ echo $i > write_event
+ done
+
+ check_trace 1 8 trace
+
+ #
+ # Test interaction with consuming read
+ #
+
+ cat trace_pipe > /dev/null &
+ pid=$!
+
+ sleep 1
+ kill $pid
+
+ test $(wc -l < trace) -eq 0
+
+ for i in $(seq 16 32); do
+ echo $i > write_event
+ done
+
+ check_trace 16 32 trace
+
+ #
+ # Test interaction with reset
+ #
+
+ echo 0 > trace
+
+ test $(wc -l < trace) -eq 0
+
+ for i in $(seq 1 8); do
+ echo $i > write_event
+ done
+
+ check_trace 1 8 trace
+
+ #
+ # Test interaction with lost events
+ #
+
+ # Ensure the writer is not on the reader page by reloading the buffer
+ echo 0 > tracing_on
+ echo 0 > trace
+ assert_unloaded
+ echo 1 > tracing_on
+ assert_loaded
+
+ # Ensure ring-buffer overflow by emitting events from the same CPU
+ for cpu in $(get_cpu_ids); do
+ break
+ done
+
+ events_per_page=$(($(get_page_size) / $(get_selftest_event_size))) # Approx: does not take TS into account
+ nr_events=$(($events_per_page * 2))
+ for i in $(seq 1 $nr_events); do
+ taskset -c $cpu echo $i > write_event
+ done
+
+ id=$(sed -n -e '1s/\[[0-9]*\]\s*[0-9]*.[0-9]*: [a-z]* id=\([0-9]*\)/\1/p' trace)
+ test $id -ne 1
+
+ check_trace $id $nr_events trace
+
+ #
+ # Test per-CPU interface
+ #
+ echo 0 > trace
+
+ for cpu in $(get_cpu_ids) ; do
+ taskset -c $cpu echo $cpu > write_event
+ done
+
+ for cpu in $(get_cpu_ids); do
+ cd per_cpu/cpu$cpu/
+
+ check_trace $cpu $cpu trace
+
+ cd - > /dev/null
+ done
+
+ #
+ # Test with hotplug
+ #
+
+ [ "$(get_cpu_ids | wc -l)" -ge 2 ] || return 0
+
+ echo 0 > trace
+
+ for cpu in $(get_cpu_ids); do
+ echo 0 > /sys/devices/system/cpu/cpu$cpu/online
+ break
+ done
+
+ for i in $(seq 1 8); do
+ echo $i > write_event
+ done
+
+ check_trace 1 8 trace
+
+ echo 1 > /sys/devices/system/cpu/cpu$cpu/online
+}
+
+if [ -z "$SOURCE_REMOTE_TEST" ]; then
+ set -e
+
+ setup_remote_test
+ test_trace
+fi
diff --git a/tools/testing/selftests/ftrace/test.d/remotes/trace_pipe.tc b/tools/testing/selftests/ftrace/test.d/remotes/trace_pipe.tc
new file mode 100644
index 000000000000..d28eaee10c7c
--- /dev/null
+++ b/tools/testing/selftests/ftrace/test.d/remotes/trace_pipe.tc
@@ -0,0 +1,127 @@
+#!/bin/sh
+# SPDX-License-Identifier: GPL-2.0
+# description: Test trace remote consuming read
+# requires: remotes/test
+
+. $TEST_DIR/remotes/functions
+
+test_trace_pipe()
+{
+ echo 0 > tracing_on
+ assert_unloaded
+
+ # Emit events from the same CPU
+ for cpu in $(get_cpu_ids); do
+ break
+ done
+
+ #
+ # Simple test: Emit enough events to fill few pages
+ #
+
+ echo 1024 > buffer_size_kb
+ echo 1 > tracing_on
+ assert_loaded
+
+ events_per_page=$(($(get_page_size) / $(get_selftest_event_size)))
+ nr_events=$(($events_per_page * 4))
+
+ output=$(mktemp $TMPDIR/remote_test.XXXXXX)
+
+ cat trace_pipe > $output &
+ pid=$!
+
+ for i in $(seq 1 $nr_events); do
+ taskset -c $cpu echo $i > write_event
+ done
+
+ echo 0 > tracing_on
+ sleep 1
+ kill $pid
+
+ check_trace 1 $nr_events $output
+
+ rm $output
+
+ #
+ # Test interaction with lost events
+ #
+
+ assert_unloaded
+ echo 7 > buffer_size_kb
+ echo 1 > tracing_on
+ assert_loaded
+
+ nr_events=$((events_per_page * 2))
+ for i in $(seq 1 $nr_events); do
+ taskset -c $cpu echo $i > write_event
+ done
+
+ output=$(dump_trace_pipe)
+
+ lost_events=$(sed -n -e '1s/CPU:.*\[LOST \([0-9]*\) EVENTS\]/\1/p' $output)
+ test -n "$lost_events"
+
+ id=$(sed -n -e '2s/\[[0-9]*\]\s*[0-9]*.[0-9]*: [a-z]* id=\([0-9]*\)/\1/p' $output)
+ test "$id" -eq $(($lost_events + 1))
+
+ # Drop [LOST EVENTS] line
+ sed -i '1d' $output
+
+ check_trace $id $nr_events $output
+
+ rm $output
+
+ #
+ # Test per-CPU interface
+ #
+
+ echo 0 > trace
+ echo 1 > tracing_on
+
+ for cpu in $(get_cpu_ids); do
+ taskset -c $cpu echo $cpu > write_event
+ done
+
+ for cpu in $(get_cpu_ids); do
+ cd per_cpu/cpu$cpu/
+ output=$(dump_trace_pipe)
+
+ check_trace $cpu $cpu $output
+
+ rm $output
+ cd - > /dev/null
+ done
+
+ #
+ # Test interaction with hotplug
+ #
+
+ [ "$(get_cpu_ids | wc -l)" -ge 2 ] || return 0
+
+ echo 0 > trace
+
+ for cpu in $(get_cpu_ids); do
+ echo 0 > /sys/devices/system/cpu/cpu$cpu/online
+ break
+ done
+
+ for i in $(seq 1 8); do
+ echo $i > write_event
+ done
+
+ output=$(dump_trace_pipe)
+
+ check_trace 1 8 $output
+
+ rm $output
+
+ echo 1 > /sys/devices/system/cpu/cpu$cpu/online
+}
+
+if [ -z "$SOURCE_REMOTE_TEST" ]; then
+ set -e
+
+ setup_remote_test
+ test_trace_pipe
+fi
diff --git a/tools/testing/selftests/ftrace/test.d/remotes/unloading.tc b/tools/testing/selftests/ftrace/test.d/remotes/unloading.tc
new file mode 100644
index 000000000000..cac2190183f6
--- /dev/null
+++ b/tools/testing/selftests/ftrace/test.d/remotes/unloading.tc
@@ -0,0 +1,41 @@
+#!/bin/sh
+# SPDX-License-Identifier: GPL-2.0
+# description: Test trace remote unloading
+# requires: remotes/test
+
+. $TEST_DIR/remotes/functions
+
+test_unloading()
+{
+ # No reader, writing
+ assert_loaded
+
+ # No reader, no writing
+ echo 0 > tracing_on
+ assert_unloaded
+
+ # 1 reader, no writing
+ cat trace_pipe &
+ pid=$!
+ sleep 1
+ assert_loaded
+ kill $pid
+ assert_unloaded
+
+ # No reader, no writing, events
+ echo 1 > tracing_on
+ echo 1 > write_event
+ echo 0 > tracing_on
+ assert_loaded
+
+ # Test reset
+ clear_trace
+ assert_unloaded
+}
+
+if [ -z "$SOURCE_REMOTE_TEST" ]; then
+ set -e
+
+ setup_remote_test
+ test_unloading
+fi
--
2.51.0.618.g983fd99d29-goog
^ permalink raw reply related [flat|nested] 33+ messages in thread* [PATCH v7 16/28] Documentation: tracing: Add tracing remotes
2025-10-03 13:37 [PATCH v7 00/28] Tracefs support for pKVM Vincent Donnefort
` (14 preceding siblings ...)
2025-10-03 13:38 ` [PATCH v7 15/28] tracing: selftests: Add trace remote tests Vincent Donnefort
@ 2025-10-03 13:38 ` Vincent Donnefort
2025-10-03 13:38 ` [PATCH v7 17/28] tracing: load/unload page callbacks for simple_ring_buffer Vincent Donnefort
` (11 subsequent siblings)
27 siblings, 0 replies; 33+ messages in thread
From: Vincent Donnefort @ 2025-10-03 13:38 UTC (permalink / raw)
To: rostedt, mhiramat, mathieu.desnoyers, linux-trace-kernel, maz,
oliver.upton, joey.gouly, suzuki.poulose, yuzenghui
Cc: kvmarm, linux-arm-kernel, jstultz, qperret, will, aneesh.kumar,
kernel-team, linux-kernel, Vincent Donnefort
Add documentation about the newly introduced tracing remotes framework.
Signed-off-by: Vincent Donnefort <vdonnefort@google.com>
diff --git a/Documentation/trace/index.rst b/Documentation/trace/index.rst
index b4a429dc4f7a..d77ffb7e2d08 100644
--- a/Documentation/trace/index.rst
+++ b/Documentation/trace/index.rst
@@ -90,6 +90,17 @@ interactions.
user_events
uprobetracer
+Remote Tracing
+--------------
+
+This section covers the framework to read compatible ring-buffers, written by
+entities outside of the kernel (most likely firmware or hypervisor)
+
+.. toctree::
+ :maxdepth: 1
+
+ remotes
+
Additional Resources
--------------------
diff --git a/Documentation/trace/remotes.rst b/Documentation/trace/remotes.rst
new file mode 100644
index 000000000000..e7fb3ee96c30
--- /dev/null
+++ b/Documentation/trace/remotes.rst
@@ -0,0 +1,59 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+===============
+Tracing Remotes
+===============
+
+:Author: Vincent Donnefort <vdonnefort@google.com>
+
+Overview
+========
+A trace remote relies on ring-buffer remotes to read and control compatible
+tracing buffers, written by entity such as firmware or hypervisor.
+
+Once registered, a tracefs instance will appear for this remote in the Tracefs
+directory **remotes/**. This remote can be read and controlled using the same
+files as regular Tracefs instances such as **trace_pipe**, **tracing_on** or
+**trace**.
+
+Register a remote
+=================
+A remote must provide a set of callbacks `struct trace_remote_callbacks` whom
+description can be found below. Those callbacks allows Tracefs to enable and
+disable tracing and events, to load and unload a tracing buffer (a set of
+ring-buffers) and to swap a reader page with the head page, which enables
+consuming reading.
+
+.. kernel-doc:: include/linux/trace_remote.h
+
+Declare a remote event
+======================
+Macros are provided to ease the declaration of remote events, in a similar
+fashion to in-kernel events. A declaration must provide an ID, a description of
+the event arguments and how to print the event:
+
+.. code-block:: c
+
+ REMOTE_EVENT(foo, EVENT_FOO_ID,
+ RE_STRUCT(
+ re_field(u64, bar)
+ ),
+ RE_PRINTK("bar=%lld", __entry->bar)
+ );
+
+Then those events must be declared in a C file with the following:
+
+.. code-block:: c
+
+ #define REMOTE_EVENT_INCLUDE_FILE foo_events.h
+ #include <trace/define_remote_events.h>
+
+This will provide a `struct remote_event remote_event_foo` that can be given to
+`trace_remote_register`.
+
+Simple ring-buffer
+==================
+A simple implementation for a ring-buffer writer can be found in
+kernel/trace/simple_ring_buffer.c.
+
+.. kernel-doc:: include/linux/simple_ring_buffer.h
--
2.51.0.618.g983fd99d29-goog
^ permalink raw reply related [flat|nested] 33+ messages in thread* [PATCH v7 17/28] tracing: load/unload page callbacks for simple_ring_buffer
2025-10-03 13:37 [PATCH v7 00/28] Tracefs support for pKVM Vincent Donnefort
` (15 preceding siblings ...)
2025-10-03 13:38 ` [PATCH v7 16/28] Documentation: tracing: Add tracing remotes Vincent Donnefort
@ 2025-10-03 13:38 ` Vincent Donnefort
2025-10-03 13:38 ` [PATCH v7 18/28] tracing: Check for undefined symbols in simple_ring_buffer Vincent Donnefort
` (10 subsequent siblings)
27 siblings, 0 replies; 33+ messages in thread
From: Vincent Donnefort @ 2025-10-03 13:38 UTC (permalink / raw)
To: rostedt, mhiramat, mathieu.desnoyers, linux-trace-kernel, maz,
oliver.upton, joey.gouly, suzuki.poulose, yuzenghui
Cc: kvmarm, linux-arm-kernel, jstultz, qperret, will, aneesh.kumar,
kernel-team, linux-kernel, Vincent Donnefort
Add load/unload callback used for each admitted page in the ring-buffer.
This will be later useful for the pKVM hypervisor which uses a different
VA space and need to dynamically map/unmap the ring-buffer pages.
Signed-off-by: Vincent Donnefort <vdonnefort@google.com>
diff --git a/include/linux/simple_ring_buffer.h b/include/linux/simple_ring_buffer.h
index f324df2f875b..ecd0e988c699 100644
--- a/include/linux/simple_ring_buffer.h
+++ b/include/linux/simple_ring_buffer.h
@@ -110,4 +110,11 @@ int simple_ring_buffer_reset(struct simple_rb_per_cpu *cpu_buffer);
*/
int simple_ring_buffer_swap_reader_page(struct simple_rb_per_cpu *cpu_buffer);
+int __simple_ring_buffer_init(struct simple_rb_per_cpu *cpu_buffer,
+ struct simple_buffer_page *bpages,
+ const struct ring_buffer_desc *desc,
+ void *(*load_page)(unsigned long va),
+ void (*unload_page)(void *va));
+void __simple_ring_buffer_unload(struct simple_rb_per_cpu *cpu_buffer,
+ void (*unload_page)(void *));
#endif
diff --git a/kernel/trace/simple_ring_buffer.c b/kernel/trace/simple_ring_buffer.c
index 20e3cd6071a2..c2ec6017c37c 100644
--- a/kernel/trace/simple_ring_buffer.c
+++ b/kernel/trace/simple_ring_buffer.c
@@ -71,7 +71,7 @@ static void simple_bpage_reset(struct simple_buffer_page *bpage)
local_set(&bpage->page->commit, 0);
}
-static void simple_bpage_init(struct simple_buffer_page *bpage, unsigned long page)
+static void simple_bpage_init(struct simple_buffer_page *bpage, void *page)
{
INIT_LIST_HEAD(&bpage->link);
bpage->page = (struct buffer_data_page *)page;
@@ -338,10 +338,15 @@ int simple_ring_buffer_reset(struct simple_rb_per_cpu *cpu_buffer)
return 0;
}
-int simple_ring_buffer_init(struct simple_rb_per_cpu *cpu_buffer, struct simple_buffer_page *bpages,
- const struct ring_buffer_desc *desc)
+int __simple_ring_buffer_init(struct simple_rb_per_cpu *cpu_buffer,
+ struct simple_buffer_page *bpages,
+ const struct ring_buffer_desc *desc,
+ void *(*load_page)(unsigned long va),
+ void (*unload_page)(void *va))
{
struct simple_buffer_page *bpage = bpages;
+ int ret = 0;
+ void *page;
int i;
/* At least 1 reader page and two pages in the ring-buffer */
@@ -350,15 +355,22 @@ int simple_ring_buffer_init(struct simple_rb_per_cpu *cpu_buffer, struct simple_
memset(cpu_buffer, 0, sizeof(*cpu_buffer));
- cpu_buffer->bpages = bpages;
+ cpu_buffer->meta = load_page(desc->meta_va);
+ if (!cpu_buffer->meta)
+ return -EINVAL;
- cpu_buffer->meta = (void *)desc->meta_va;
memset(cpu_buffer->meta, 0, sizeof(*cpu_buffer->meta));
cpu_buffer->meta->meta_page_size = PAGE_SIZE;
cpu_buffer->meta->nr_subbufs = cpu_buffer->nr_pages;
/* The reader page is not part of the ring initially */
- simple_bpage_init(bpage, desc->page_va[0]);
+ page = load_page(desc->page_va[0]);
+ if (!page) {
+ unload_page(cpu_buffer->meta);
+ return -EINVAL;
+ }
+
+ simple_bpage_init(bpage, page);
bpage->id = 0;
cpu_buffer->nr_pages = 1;
@@ -368,7 +380,13 @@ int simple_ring_buffer_init(struct simple_rb_per_cpu *cpu_buffer, struct simple_
cpu_buffer->head_page = bpage + 1;
for (i = 1; i < desc->nr_page_va; i++) {
- simple_bpage_init(++bpage, desc->page_va[i]);
+ page = load_page(desc->page_va[i]);
+ if (!page) {
+ ret = -EINVAL;
+ break;
+ }
+
+ simple_bpage_init(++bpage, page);
bpage->link.next = &(bpage + 1)->link;
bpage->link.prev = &(bpage - 1)->link;
@@ -377,6 +395,14 @@ int simple_ring_buffer_init(struct simple_rb_per_cpu *cpu_buffer, struct simple_
cpu_buffer->nr_pages = i + 1;
}
+ if (ret) {
+ for (i--; i >= 0; i--)
+ unload_page((void *)desc->page_va[i]);
+ unload_page(cpu_buffer->meta);
+
+ return ret;
+ }
+
/* Close the ring */
bpage->link.next = &cpu_buffer->tail_page->link;
cpu_buffer->tail_page->link.prev = &bpage->link;
@@ -384,19 +410,46 @@ int simple_ring_buffer_init(struct simple_rb_per_cpu *cpu_buffer, struct simple_
/* The last init'ed page points to the head page */
simple_bpage_set_head_link(bpage);
+ cpu_buffer->bpages = bpages;
+
return 0;
}
-void simple_ring_buffer_unload(struct simple_rb_per_cpu *cpu_buffer)
+static void *__load_page(unsigned long page)
{
+ return (void *)page;
+}
+
+static void __unload_page(void *page) { }
+
+int simple_ring_buffer_init(struct simple_rb_per_cpu *cpu_buffer, struct simple_buffer_page *bpages,
+ const struct ring_buffer_desc *desc)
+{
+ return __simple_ring_buffer_init(cpu_buffer, bpages, desc, __load_page, __unload_page);
+}
+
+void __simple_ring_buffer_unload(struct simple_rb_per_cpu *cpu_buffer,
+ void (*unload_page)(void *))
+{
+ int p;
+
if (!simple_rb_loaded(cpu_buffer))
return;
simple_rb_enable_tracing(cpu_buffer, false);
+ unload_page(cpu_buffer->meta);
+ for (p = 0; p < cpu_buffer->nr_pages; p++)
+ unload_page(cpu_buffer->bpages[p].page);
+
cpu_buffer->bpages = NULL;
}
+void simple_ring_buffer_unload(struct simple_rb_per_cpu *cpu_buffer)
+{
+ return __simple_ring_buffer_unload(cpu_buffer, __unload_page);
+}
+
int simple_ring_buffer_enable_tracing(struct simple_rb_per_cpu *cpu_buffer, bool enable)
{
if (!simple_rb_loaded(cpu_buffer))
--
2.51.0.618.g983fd99d29-goog
^ permalink raw reply related [flat|nested] 33+ messages in thread* [PATCH v7 18/28] tracing: Check for undefined symbols in simple_ring_buffer
2025-10-03 13:37 [PATCH v7 00/28] Tracefs support for pKVM Vincent Donnefort
` (16 preceding siblings ...)
2025-10-03 13:38 ` [PATCH v7 17/28] tracing: load/unload page callbacks for simple_ring_buffer Vincent Donnefort
@ 2025-10-03 13:38 ` Vincent Donnefort
2025-10-03 13:38 ` [PATCH v7 19/28] KVM: arm64: Support unaligned fixmap in the pKVM hyp Vincent Donnefort
` (9 subsequent siblings)
27 siblings, 0 replies; 33+ messages in thread
From: Vincent Donnefort @ 2025-10-03 13:38 UTC (permalink / raw)
To: rostedt, mhiramat, mathieu.desnoyers, linux-trace-kernel, maz,
oliver.upton, joey.gouly, suzuki.poulose, yuzenghui
Cc: kvmarm, linux-arm-kernel, jstultz, qperret, will, aneesh.kumar,
kernel-team, linux-kernel, Vincent Donnefort
The simple_ring_buffer implementation must remain simple enough to be
used by the pKVM hypervisor. Prevent the object build if unresolved
symbols are found.
Signed-off-by: Vincent Donnefort <vdonnefort@google.com>
diff --git a/kernel/trace/Makefile b/kernel/trace/Makefile
index 53534447e70b..38523ea2e19b 100644
--- a/kernel/trace/Makefile
+++ b/kernel/trace/Makefile
@@ -114,4 +114,20 @@ obj-$(CONFIG_TRACE_REMOTE) += trace_remote.o
obj-$(CONFIG_SIMPLE_RING_BUFFER) += simple_ring_buffer.o
obj-$(CONFIG_TRACE_REMOTE_TEST) += remote_test.o
+#
+# simple_ring_buffer is used by the pKVM hypervisor which does not have access
+# to all kernel symbols. Fail the build if forbidden symbols are found.
+#
+UNDEFINED_ALLOWLIST := memset alt_cb_patch_nops __x86 __ubsan __asan __kasan __gcov __aeabi_unwind
+UNDEFINED_ALLOWLIST += __stack_chk_fail stackleak_track_stack __ref_stack __sanitizer
+UNDEFINED_ALLOWLIST := $(addprefix -e , $(UNDEFINED_ALLOWLIST))
+
+quiet_cmd_check_undefined = NM $<
+ cmd_check_undefined = test -z "`$(NM) -u $< | grep -v $(UNDEFINED_ALLOWLIST)`"
+
+$(obj)/%.o.checked: $(obj)/%.o FORCE
+ $(call if_changed,check_undefined)
+
+always-$(CONFIG_SIMPLE_RING_BUFFER) += simple_ring_buffer.o.checked
+
libftrace-y := ftrace.o
--
2.51.0.618.g983fd99d29-goog
^ permalink raw reply related [flat|nested] 33+ messages in thread* [PATCH v7 19/28] KVM: arm64: Support unaligned fixmap in the pKVM hyp
2025-10-03 13:37 [PATCH v7 00/28] Tracefs support for pKVM Vincent Donnefort
` (17 preceding siblings ...)
2025-10-03 13:38 ` [PATCH v7 18/28] tracing: Check for undefined symbols in simple_ring_buffer Vincent Donnefort
@ 2025-10-03 13:38 ` Vincent Donnefort
2025-10-03 13:38 ` [PATCH v7 20/28] KVM: arm64: Add clock support for " Vincent Donnefort
` (8 subsequent siblings)
27 siblings, 0 replies; 33+ messages in thread
From: Vincent Donnefort @ 2025-10-03 13:38 UTC (permalink / raw)
To: rostedt, mhiramat, mathieu.desnoyers, linux-trace-kernel, maz,
oliver.upton, joey.gouly, suzuki.poulose, yuzenghui
Cc: kvmarm, linux-arm-kernel, jstultz, qperret, will, aneesh.kumar,
kernel-team, linux-kernel, Vincent Donnefort
Return the fixmap VA with the page offset, instead of the page base
address. This allows to use hyp_fixmap_map() seamlessly regardless of
the address alignment.
Signed-off-by: Vincent Donnefort <vdonnefort@google.com>
diff --git a/arch/arm64/kvm/hyp/nvhe/mm.c b/arch/arm64/kvm/hyp/nvhe/mm.c
index ae8391baebc3..75014dc7d82e 100644
--- a/arch/arm64/kvm/hyp/nvhe/mm.c
+++ b/arch/arm64/kvm/hyp/nvhe/mm.c
@@ -239,7 +239,7 @@ static void *fixmap_map_slot(struct hyp_fixmap_slot *slot, phys_addr_t phys)
WRITE_ONCE(*ptep, pte);
dsb(ishst);
- return (void *)slot->addr;
+ return (void *)slot->addr + offset_in_page(phys);
}
void *hyp_fixmap_map(phys_addr_t phys)
--
2.51.0.618.g983fd99d29-goog
^ permalink raw reply related [flat|nested] 33+ messages in thread* [PATCH v7 20/28] KVM: arm64: Add clock support for the pKVM hyp
2025-10-03 13:37 [PATCH v7 00/28] Tracefs support for pKVM Vincent Donnefort
` (18 preceding siblings ...)
2025-10-03 13:38 ` [PATCH v7 19/28] KVM: arm64: Support unaligned fixmap in the pKVM hyp Vincent Donnefort
@ 2025-10-03 13:38 ` Vincent Donnefort
2025-10-03 13:38 ` [PATCH v7 21/28] KVM: arm64: Add tracing capability " Vincent Donnefort
` (7 subsequent siblings)
27 siblings, 0 replies; 33+ messages in thread
From: Vincent Donnefort @ 2025-10-03 13:38 UTC (permalink / raw)
To: rostedt, mhiramat, mathieu.desnoyers, linux-trace-kernel, maz,
oliver.upton, joey.gouly, suzuki.poulose, yuzenghui
Cc: kvmarm, linux-arm-kernel, jstultz, qperret, will, aneesh.kumar,
kernel-team, linux-kernel, Vincent Donnefort
By default, the arm64 host kernel is using the arch timer as a source
for sched_clock. Conveniently, EL2 has access to that same counter,
allowing to generate clock values that are synchronized.
The clock needs nonetheless to be setup with the same slope values as
the kernel. Introducing at the same time trace_clock() which is expected
to be later configured by the hypervisor tracing.
Signed-off-by: Vincent Donnefort <vdonnefort@google.com>
diff --git a/arch/arm64/include/asm/kvm_hyp.h b/arch/arm64/include/asm/kvm_hyp.h
index e6be1f5d0967..d46621d936e3 100644
--- a/arch/arm64/include/asm/kvm_hyp.h
+++ b/arch/arm64/include/asm/kvm_hyp.h
@@ -146,5 +146,4 @@ extern u64 kvm_nvhe_sym(id_aa64smfr0_el1_sys_val);
extern unsigned long kvm_nvhe_sym(__icache_flags);
extern unsigned int kvm_nvhe_sym(kvm_arm_vmid_bits);
extern unsigned int kvm_nvhe_sym(kvm_host_sve_max_vl);
-
#endif /* __ARM64_KVM_HYP_H__ */
diff --git a/arch/arm64/kvm/hyp/include/nvhe/clock.h b/arch/arm64/kvm/hyp/include/nvhe/clock.h
new file mode 100644
index 000000000000..9e152521f345
--- /dev/null
+++ b/arch/arm64/kvm/hyp/include/nvhe/clock.h
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __ARM64_KVM_HYP_NVHE_CLOCK_H
+#define __ARM64_KVM_HYP_NVHE_CLOCK_H
+#include <linux/types.h>
+
+#include <asm/kvm_hyp.h>
+
+#ifdef CONFIG_PKVM_TRACING
+void trace_clock_update(u32 mult, u32 shift, u64 epoch_ns, u64 epoch_cyc);
+u64 trace_clock(void);
+#else
+static inline void
+trace_clock_update(u32 mult, u32 shift, u64 epoch_ns, u64 epoch_cyc) { }
+static inline u64 trace_clock(void) { return 0; }
+#endif
+#endif
diff --git a/arch/arm64/kvm/hyp/nvhe/Makefile b/arch/arm64/kvm/hyp/nvhe/Makefile
index 0b0a68b663d4..607357e36026 100644
--- a/arch/arm64/kvm/hyp/nvhe/Makefile
+++ b/arch/arm64/kvm/hyp/nvhe/Makefile
@@ -17,7 +17,7 @@ ccflags-y += -fno-stack-protector \
hostprogs := gen-hyprel
HOST_EXTRACFLAGS += -I$(objtree)/include
-lib-objs := clear_page.o copy_page.o memcpy.o memset.o
+lib-objs := clear_page.o copy_page.o memcpy.o memset.o tishift.o
lib-objs := $(addprefix ../../../lib/, $(lib-objs))
CFLAGS_switch.nvhe.o += -Wno-override-init
@@ -28,6 +28,7 @@ hyp-obj-y := timer-sr.o sysreg-sr.o debug-sr.o switch.o tlb.o hyp-init.o host.o
hyp-obj-y += ../vgic-v3-sr.o ../aarch32.o ../vgic-v2-cpuif-proxy.o ../entry.o \
../fpsimd.o ../hyp-entry.o ../exception.o ../pgtable.o
hyp-obj-$(CONFIG_LIST_HARDENED) += list_debug.o
+hyp-obj-$(CONFIG_PKVM_TRACING) += clock.o
hyp-obj-y += $(lib-objs)
##
diff --git a/arch/arm64/kvm/hyp/nvhe/clock.c b/arch/arm64/kvm/hyp/nvhe/clock.c
new file mode 100644
index 000000000000..600a300bece7
--- /dev/null
+++ b/arch/arm64/kvm/hyp/nvhe/clock.c
@@ -0,0 +1,65 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2025 Google LLC
+ * Author: Vincent Donnefort <vdonnefort@google.com>
+ */
+
+#include <nvhe/clock.h>
+
+#include <asm/arch_timer.h>
+#include <asm/div64.h>
+
+static struct clock_data {
+ struct {
+ u32 mult;
+ u32 shift;
+ u64 epoch_ns;
+ u64 epoch_cyc;
+ u64 cyc_overflow64;
+ } data[2];
+ u64 cur;
+} trace_clock_data;
+
+static u64 __clock_mult_uint128(u64 cyc, u32 mult, u32 shift)
+{
+ __uint128_t ns = (__uint128_t)cyc * mult;
+
+ ns >>= shift;
+
+ return (u64)ns;
+}
+
+/* Does not guarantee no reader on the modified bank. */
+void trace_clock_update(u32 mult, u32 shift, u64 epoch_ns, u64 epoch_cyc)
+{
+ struct clock_data *clock = &trace_clock_data;
+ u64 bank = clock->cur ^ 1;
+
+ clock->data[bank].mult = mult;
+ clock->data[bank].shift = shift;
+ clock->data[bank].epoch_ns = epoch_ns;
+ clock->data[bank].epoch_cyc = epoch_cyc;
+ clock->data[bank].cyc_overflow64 = ULONG_MAX / mult;
+
+ smp_store_release(&clock->cur, bank);
+}
+
+/* Using host provided data. Do not use for anything else than debugging. */
+u64 trace_clock(void)
+{
+ struct clock_data *clock = &trace_clock_data;
+ u64 bank = smp_load_acquire(&clock->cur);
+ u64 cyc, ns;
+
+ cyc = __arch_counter_get_cntpct() - clock->data[bank].epoch_cyc;
+
+ if (likely(cyc < clock->data[bank].cyc_overflow64)) {
+ ns = cyc * clock->data[bank].mult;
+ ns >>= clock->data[bank].shift;
+ } else {
+ ns = __clock_mult_uint128(cyc, clock->data[bank].mult,
+ clock->data[bank].shift);
+ }
+
+ return (u64)ns + clock->data[bank].epoch_ns;
+}
--
2.51.0.618.g983fd99d29-goog
^ permalink raw reply related [flat|nested] 33+ messages in thread* [PATCH v7 21/28] KVM: arm64: Add tracing capability for the pKVM hyp
2025-10-03 13:37 [PATCH v7 00/28] Tracefs support for pKVM Vincent Donnefort
` (19 preceding siblings ...)
2025-10-03 13:38 ` [PATCH v7 20/28] KVM: arm64: Add clock support for " Vincent Donnefort
@ 2025-10-03 13:38 ` Vincent Donnefort
2025-10-03 13:38 ` [PATCH v7 22/28] KVM: arm64: Add trace remote " Vincent Donnefort
` (6 subsequent siblings)
27 siblings, 0 replies; 33+ messages in thread
From: Vincent Donnefort @ 2025-10-03 13:38 UTC (permalink / raw)
To: rostedt, mhiramat, mathieu.desnoyers, linux-trace-kernel, maz,
oliver.upton, joey.gouly, suzuki.poulose, yuzenghui
Cc: kvmarm, linux-arm-kernel, jstultz, qperret, will, aneesh.kumar,
kernel-team, linux-kernel, Vincent Donnefort
When running with protected mode, the host has very little knowledge
about what is happening in the hypervisor. Of course this is an
essential feature for security but nonetheless, that piece of code
growing with more responsibilities, we need now a way to debug and
profile it. Tracefs by its reliability, versatility and support for
user-space is the perfect tool.
There's no way the hypervisor could log events directly into the host
tracefs ring-buffers. So instead let's use our own, where the hypervisor
is the writer and the host the reader.
Signed-off-by: Vincent Donnefort <vdonnefort@google.com>
diff --git a/arch/arm64/include/asm/kvm_asm.h b/arch/arm64/include/asm/kvm_asm.h
index bec227f9500a..437ac948d136 100644
--- a/arch/arm64/include/asm/kvm_asm.h
+++ b/arch/arm64/include/asm/kvm_asm.h
@@ -87,6 +87,10 @@ enum __kvm_host_smccc_func {
__KVM_HOST_SMCCC_FUNC___pkvm_vcpu_load,
__KVM_HOST_SMCCC_FUNC___pkvm_vcpu_put,
__KVM_HOST_SMCCC_FUNC___pkvm_tlb_flush_vmid,
+ __KVM_HOST_SMCCC_FUNC___pkvm_load_tracing,
+ __KVM_HOST_SMCCC_FUNC___pkvm_unload_tracing,
+ __KVM_HOST_SMCCC_FUNC___pkvm_enable_tracing,
+ __KVM_HOST_SMCCC_FUNC___pkvm_swap_reader_tracing,
};
#define DECLARE_KVM_VHE_SYM(sym) extern char sym[]
diff --git a/arch/arm64/include/asm/kvm_hyptrace.h b/arch/arm64/include/asm/kvm_hyptrace.h
new file mode 100644
index 000000000000..9c30a479bc36
--- /dev/null
+++ b/arch/arm64/include/asm/kvm_hyptrace.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+#ifndef __ARM64_KVM_HYPTRACE_H_
+#define __ARM64_KVM_HYPTRACE_H_
+
+#include <linux/ring_buffer.h>
+
+struct hyp_trace_desc {
+ unsigned long bpages_backing_start;
+ size_t bpages_backing_size;
+ struct trace_buffer_desc trace_buffer_desc;
+
+};
+#endif
diff --git a/arch/arm64/kvm/Kconfig b/arch/arm64/kvm/Kconfig
index 713248f240e0..06e948d066ac 100644
--- a/arch/arm64/kvm/Kconfig
+++ b/arch/arm64/kvm/Kconfig
@@ -82,4 +82,11 @@ config PTDUMP_STAGE2_DEBUGFS
If in doubt, say N.
+config PKVM_TRACING
+ bool
+ depends on KVM
+ depends on TRACING
+ select SIMPLE_RING_BUFFER
+ default y
+
endif # VIRTUALIZATION
diff --git a/arch/arm64/kvm/hyp/include/nvhe/trace.h b/arch/arm64/kvm/hyp/include/nvhe/trace.h
new file mode 100644
index 000000000000..996e90c0974f
--- /dev/null
+++ b/arch/arm64/kvm/hyp/include/nvhe/trace.h
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+#ifndef __ARM64_KVM_HYP_NVHE_TRACE_H
+#define __ARM64_KVM_HYP_NVHE_TRACE_H
+#include <asm/kvm_hyptrace.h>
+
+#ifdef CONFIG_PKVM_TRACING
+void *tracing_reserve_entry(unsigned long length);
+void tracing_commit_entry(void);
+
+int __pkvm_load_tracing(unsigned long desc_va, size_t desc_size);
+void __pkvm_unload_tracing(void);
+int __pkvm_enable_tracing(bool enable);
+int __pkvm_swap_reader_tracing(unsigned int cpu);
+#else
+static inline void *tracing_reserve_entry(unsigned long length) { return NULL; }
+static inline void tracing_commit_entry(void) { }
+
+static inline int __pkvm_load_tracing(unsigned long desc_va, size_t desc_size) { return -ENODEV; }
+static inline void __pkvm_unload_tracing(void) { }
+static inline int __pkvm_enable_tracing(bool enable) { return -ENODEV; }
+static inline int __pkvm_swap_reader_tracing(unsigned int cpu) { return -ENODEV; }
+#endif
+#endif
diff --git a/arch/arm64/kvm/hyp/nvhe/Makefile b/arch/arm64/kvm/hyp/nvhe/Makefile
index 607357e36026..e640f12808f7 100644
--- a/arch/arm64/kvm/hyp/nvhe/Makefile
+++ b/arch/arm64/kvm/hyp/nvhe/Makefile
@@ -28,7 +28,7 @@ hyp-obj-y := timer-sr.o sysreg-sr.o debug-sr.o switch.o tlb.o hyp-init.o host.o
hyp-obj-y += ../vgic-v3-sr.o ../aarch32.o ../vgic-v2-cpuif-proxy.o ../entry.o \
../fpsimd.o ../hyp-entry.o ../exception.o ../pgtable.o
hyp-obj-$(CONFIG_LIST_HARDENED) += list_debug.o
-hyp-obj-$(CONFIG_PKVM_TRACING) += clock.o
+hyp-obj-$(CONFIG_PKVM_TRACING) += clock.o trace.o ../../../../../kernel/trace/simple_ring_buffer.o
hyp-obj-y += $(lib-objs)
##
diff --git a/arch/arm64/kvm/hyp/nvhe/hyp-main.c b/arch/arm64/kvm/hyp/nvhe/hyp-main.c
index 3206b2c07f82..02b2fdd9a8e4 100644
--- a/arch/arm64/kvm/hyp/nvhe/hyp-main.c
+++ b/arch/arm64/kvm/hyp/nvhe/hyp-main.c
@@ -18,6 +18,7 @@
#include <nvhe/mem_protect.h>
#include <nvhe/mm.h>
#include <nvhe/pkvm.h>
+#include <nvhe/trace.h>
#include <nvhe/trap_handler.h>
DEFINE_PER_CPU(struct kvm_nvhe_init_params, kvm_init_params);
@@ -573,6 +574,35 @@ static void handle___pkvm_teardown_vm(struct kvm_cpu_context *host_ctxt)
cpu_reg(host_ctxt, 1) = __pkvm_teardown_vm(handle);
}
+static void handle___pkvm_load_tracing(struct kvm_cpu_context *host_ctxt)
+{
+ DECLARE_REG(unsigned long, desc_hva, host_ctxt, 1);
+ DECLARE_REG(size_t, desc_size, host_ctxt, 2);
+
+ cpu_reg(host_ctxt, 1) = __pkvm_load_tracing(desc_hva, desc_size);
+}
+
+static void handle___pkvm_unload_tracing(struct kvm_cpu_context *host_ctxt)
+{
+ __pkvm_unload_tracing();
+
+ cpu_reg(host_ctxt, 1) = 0;
+}
+
+static void handle___pkvm_enable_tracing(struct kvm_cpu_context *host_ctxt)
+{
+ DECLARE_REG(bool, enable, host_ctxt, 1);
+
+ cpu_reg(host_ctxt, 1) = __pkvm_enable_tracing(enable);
+}
+
+static void handle___pkvm_swap_reader_tracing(struct kvm_cpu_context *host_ctxt)
+{
+ DECLARE_REG(unsigned int, cpu, host_ctxt, 1);
+
+ cpu_reg(host_ctxt, 1) = __pkvm_swap_reader_tracing(cpu);
+}
+
typedef void (*hcall_t)(struct kvm_cpu_context *);
#define HANDLE_FUNC(x) [__KVM_HOST_SMCCC_FUNC_##x] = (hcall_t)handle_##x
@@ -612,6 +642,10 @@ static const hcall_t host_hcall[] = {
HANDLE_FUNC(__pkvm_vcpu_load),
HANDLE_FUNC(__pkvm_vcpu_put),
HANDLE_FUNC(__pkvm_tlb_flush_vmid),
+ HANDLE_FUNC(__pkvm_load_tracing),
+ HANDLE_FUNC(__pkvm_unload_tracing),
+ HANDLE_FUNC(__pkvm_enable_tracing),
+ HANDLE_FUNC(__pkvm_swap_reader_tracing),
};
static void handle_host_hcall(struct kvm_cpu_context *host_ctxt)
diff --git a/arch/arm64/kvm/hyp/nvhe/trace.c b/arch/arm64/kvm/hyp/nvhe/trace.c
new file mode 100644
index 000000000000..def5cbc75722
--- /dev/null
+++ b/arch/arm64/kvm/hyp/nvhe/trace.c
@@ -0,0 +1,257 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2025 Google LLC
+ * Author: Vincent Donnefort <vdonnefort@google.com>
+ */
+
+#include <nvhe/clock.h>
+#include <nvhe/mem_protect.h>
+#include <nvhe/mm.h>
+#include <nvhe/trace.h>
+
+#include <asm/percpu.h>
+#include <asm/kvm_mmu.h>
+#include <asm/local.h>
+
+#include <linux/simple_ring_buffer.h>
+
+static DEFINE_PER_CPU(struct simple_rb_per_cpu, __simple_rbs);
+
+static struct hyp_trace_buffer {
+ struct simple_rb_per_cpu __percpu *simple_rbs;
+ unsigned long bpages_backing_start;
+ size_t bpages_backing_size;
+ hyp_spinlock_t lock;
+} trace_buffer = {
+ .simple_rbs = &__simple_rbs,
+ .lock = __HYP_SPIN_LOCK_UNLOCKED,
+};
+
+static bool hyp_trace_buffer_loaded(struct hyp_trace_buffer *trace_buffer)
+{
+ return trace_buffer->bpages_backing_size > 0;
+}
+
+void *tracing_reserve_entry(unsigned long length)
+{
+ return simple_ring_buffer_reserve(this_cpu_ptr(trace_buffer.simple_rbs), length,
+ trace_clock());
+}
+
+void tracing_commit_entry(void)
+{
+ simple_ring_buffer_commit(this_cpu_ptr(trace_buffer.simple_rbs));
+}
+
+static int hyp_trace_buffer_load_bpage_backing(struct hyp_trace_buffer *trace_buffer,
+ struct hyp_trace_desc *desc)
+{
+ unsigned long start = kern_hyp_va(desc->bpages_backing_start);
+ size_t size = desc->bpages_backing_size;
+ int ret;
+
+ if (!PAGE_ALIGNED(start) || !PAGE_ALIGNED(size))
+ return -EINVAL;
+
+ ret = __pkvm_host_donate_hyp(hyp_virt_to_pfn((void *)start), size >> PAGE_SHIFT);
+ if (ret)
+ return ret;
+
+ memset((void *)start, 0, size);
+
+ trace_buffer->bpages_backing_start = start;
+ trace_buffer->bpages_backing_size = size;
+
+ return 0;
+}
+
+static void hyp_trace_buffer_unload_bpage_backing(struct hyp_trace_buffer *trace_buffer)
+{
+ unsigned long start = trace_buffer->bpages_backing_start;
+ size_t size = trace_buffer->bpages_backing_size;
+
+ if (!size)
+ return;
+
+ memset((void *)start, 0, size);
+
+ WARN_ON(__pkvm_hyp_donate_host(hyp_virt_to_pfn(start), size >> PAGE_SHIFT));
+
+ trace_buffer->bpages_backing_start = 0;
+ trace_buffer->bpages_backing_size = 0;
+}
+
+static void *__pin_shared_page(unsigned long kern_va)
+{
+ void *va = kern_hyp_va((void *)kern_va);
+
+ return hyp_pin_shared_mem(va, va + PAGE_SIZE) ? NULL : va;
+}
+
+static void __unpin_shared_page(void *va)
+{
+ hyp_unpin_shared_mem(va, va + PAGE_SIZE);
+}
+
+static void hyp_trace_buffer_unload(struct hyp_trace_buffer *trace_buffer)
+{
+ int cpu;
+
+ hyp_assert_lock_held(&trace_buffer->lock);
+
+ if (!hyp_trace_buffer_loaded(trace_buffer))
+ return;
+
+ for (cpu = 0; cpu < hyp_nr_cpus; cpu++)
+ __simple_ring_buffer_unload(per_cpu_ptr(trace_buffer->simple_rbs, cpu),
+ __unpin_shared_page);
+
+ hyp_trace_buffer_unload_bpage_backing(trace_buffer);
+}
+
+static int hyp_trace_buffer_load(struct hyp_trace_buffer *trace_buffer,
+ struct hyp_trace_desc *desc)
+{
+ struct simple_buffer_page *bpages;
+ struct ring_buffer_desc *rb_desc;
+ int ret, cpu;
+
+ hyp_assert_lock_held(&trace_buffer->lock);
+
+ if (hyp_trace_buffer_loaded(trace_buffer))
+ return -EINVAL;
+
+ ret = hyp_trace_buffer_load_bpage_backing(trace_buffer, desc);
+ if (ret)
+ return ret;
+
+ bpages = (struct simple_buffer_page *)trace_buffer->bpages_backing_start;
+ for_each_ring_buffer_desc(rb_desc, cpu, &desc->trace_buffer_desc) {
+ ret = __simple_ring_buffer_init(per_cpu_ptr(trace_buffer->simple_rbs, cpu),
+ bpages, rb_desc, __pin_shared_page,
+ __unpin_shared_page);
+ if (ret)
+ break;
+
+ bpages += rb_desc->nr_page_va;
+ }
+
+ if (ret)
+ hyp_trace_buffer_unload(trace_buffer);
+
+ return ret;
+}
+
+static bool hyp_trace_desc_validate(struct hyp_trace_desc *desc, size_t desc_size)
+{
+ struct simple_buffer_page *bpages = (struct simple_buffer_page *)desc->bpages_backing_start;
+ struct ring_buffer_desc *rb_desc;
+ void *bpages_end, *desc_end;
+ unsigned int cpu;
+
+ desc_end = (void *)desc + desc_size; /* __pkvm_host_donate_hyp validates desc_size */
+
+ bpages_end = (void *)desc->bpages_backing_start + desc->bpages_backing_size;
+ if (bpages_end < (void *)desc->bpages_backing_start)
+ return false;
+
+ for_each_ring_buffer_desc(rb_desc, cpu, &desc->trace_buffer_desc) {
+ /* Can we read nr_page_va? */
+ if ((void *)rb_desc + struct_size(rb_desc, page_va, 0) > desc_end)
+ return false;
+
+ /* Overflow desc? */
+ if ((void *)rb_desc + struct_size(rb_desc, page_va, rb_desc->nr_page_va) > desc_end)
+ return false;
+
+ /* Overflow bpages backing memory? */
+ if ((void *)(bpages + rb_desc->nr_page_va) > bpages_end)
+ return false;
+
+ if (cpu >= hyp_nr_cpus)
+ return false;
+
+ if (cpu != rb_desc->cpu)
+ return false;
+
+ bpages += rb_desc->nr_page_va;
+ }
+
+ return true;
+}
+
+int __pkvm_load_tracing(unsigned long desc_hva, size_t desc_size)
+{
+ struct hyp_trace_desc *desc = (struct hyp_trace_desc *)kern_hyp_va(desc_hva);
+ int ret;
+
+ if (!desc_size || !PAGE_ALIGNED(desc_hva) || !PAGE_ALIGNED(desc_size))
+ return -EINVAL;
+
+ ret = __pkvm_host_donate_hyp(hyp_virt_to_pfn((void *)desc),
+ desc_size >> PAGE_SHIFT);
+ if (ret)
+ return ret;
+
+ if (!hyp_trace_desc_validate(desc, desc_size))
+ goto err_donate_desc;
+
+ hyp_spin_lock(&trace_buffer.lock);
+
+ ret = hyp_trace_buffer_load(&trace_buffer, desc);
+
+ hyp_spin_unlock(&trace_buffer.lock);
+
+err_donate_desc:
+ WARN_ON(__pkvm_hyp_donate_host(hyp_virt_to_pfn((void *)desc),
+ desc_size >> PAGE_SHIFT));
+ return ret;
+}
+
+void __pkvm_unload_tracing(void)
+{
+ hyp_spin_lock(&trace_buffer.lock);
+ hyp_trace_buffer_unload(&trace_buffer);
+ hyp_spin_unlock(&trace_buffer.lock);
+}
+
+int __pkvm_enable_tracing(bool enable)
+{
+ int cpu, ret = enable ? -EINVAL : 0;
+
+ hyp_spin_lock(&trace_buffer.lock);
+
+ if (!hyp_trace_buffer_loaded(&trace_buffer))
+ goto unlock;
+
+ for (cpu = 0; cpu < hyp_nr_cpus; cpu++)
+ simple_ring_buffer_enable_tracing(per_cpu_ptr(trace_buffer.simple_rbs, cpu),
+ enable);
+
+ ret = 0;
+
+unlock:
+ hyp_spin_unlock(&trace_buffer.lock);
+
+ return ret;
+}
+
+int __pkvm_swap_reader_tracing(unsigned int cpu)
+{
+ int ret;
+
+ if (cpu >= hyp_nr_cpus)
+ return -EINVAL;
+
+ hyp_spin_lock(&trace_buffer.lock);
+
+ if (hyp_trace_buffer_loaded(&trace_buffer))
+ ret = simple_ring_buffer_swap_reader_page(
+ per_cpu_ptr(trace_buffer.simple_rbs, cpu));
+ else
+ ret = -ENODEV;
+
+ hyp_spin_unlock(&trace_buffer.lock);
+
+ return ret;
+}
--
2.51.0.618.g983fd99d29-goog
^ permalink raw reply related [flat|nested] 33+ messages in thread* [PATCH v7 22/28] KVM: arm64: Add trace remote for the pKVM hyp
2025-10-03 13:37 [PATCH v7 00/28] Tracefs support for pKVM Vincent Donnefort
` (20 preceding siblings ...)
2025-10-03 13:38 ` [PATCH v7 21/28] KVM: arm64: Add tracing capability " Vincent Donnefort
@ 2025-10-03 13:38 ` Vincent Donnefort
2025-10-03 13:38 ` [PATCH v7 23/28] KVM: arm64: Sync boot clock with " Vincent Donnefort
` (5 subsequent siblings)
27 siblings, 0 replies; 33+ messages in thread
From: Vincent Donnefort @ 2025-10-03 13:38 UTC (permalink / raw)
To: rostedt, mhiramat, mathieu.desnoyers, linux-trace-kernel, maz,
oliver.upton, joey.gouly, suzuki.poulose, yuzenghui
Cc: kvmarm, linux-arm-kernel, jstultz, qperret, will, aneesh.kumar,
kernel-team, linux-kernel, Vincent Donnefort
When running with KVM protected mode, the hypervisor is able to generate
events into tracefs compatible ring-buffers. Create a trace remote so
the kernel can read those buffers.
This currently doesn't provide any event support which will come later.
Signed-off-by: Vincent Donnefort <vdonnefort@google.com>
diff --git a/arch/arm64/kvm/Kconfig b/arch/arm64/kvm/Kconfig
index 06e948d066ac..e06358aaf1af 100644
--- a/arch/arm64/kvm/Kconfig
+++ b/arch/arm64/kvm/Kconfig
@@ -86,6 +86,7 @@ config PKVM_TRACING
bool
depends on KVM
depends on TRACING
+ select TRACE_REMOTE
select SIMPLE_RING_BUFFER
default y
diff --git a/arch/arm64/kvm/Makefile b/arch/arm64/kvm/Makefile
index 3ebc0570345c..2c184e3abd8e 100644
--- a/arch/arm64/kvm/Makefile
+++ b/arch/arm64/kvm/Makefile
@@ -30,6 +30,8 @@ kvm-$(CONFIG_HW_PERF_EVENTS) += pmu-emul.o pmu.o
kvm-$(CONFIG_ARM64_PTR_AUTH) += pauth.o
kvm-$(CONFIG_PTDUMP_STAGE2_DEBUGFS) += ptdump.o
+kvm-$(CONFIG_PKVM_TRACING) += hyp_trace.o
+
always-y := hyp_constants.h hyp-constants.s
define rule_gen_hyp_constants
diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c
index bd6b6a620a09..6ae3c822b199 100644
--- a/arch/arm64/kvm/arm.c
+++ b/arch/arm64/kvm/arm.c
@@ -25,6 +25,7 @@
#define CREATE_TRACE_POINTS
#include "trace_arm.h"
+#include "hyp_trace.h"
#include <linux/uaccess.h>
#include <asm/ptrace.h>
@@ -2332,6 +2333,9 @@ static int __init init_subsystems(void)
kvm_register_perf_callbacks(NULL);
+ err = hyp_trace_init();
+ if (err)
+ kvm_err("Failed to initialize Hyp tracing\n");
out:
if (err)
hyp_cpu_pm_exit();
diff --git a/arch/arm64/kvm/hyp_trace.c b/arch/arm64/kvm/hyp_trace.c
new file mode 100644
index 000000000000..98051c3fb0c2
--- /dev/null
+++ b/arch/arm64/kvm/hyp_trace.c
@@ -0,0 +1,210 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2025 Google LLC
+ * Author: Vincent Donnefort <vdonnefort@google.com>
+ */
+
+#include <linux/trace_remote.h>
+#include <linux/simple_ring_buffer.h>
+
+#include <asm/kvm_host.h>
+#include <asm/kvm_hyptrace.h>
+
+#include "hyp_trace.h"
+
+/* Access to this struct within the trace_remote_callbacks are protected by the trace_remote lock */
+static struct hyp_trace_buffer {
+ struct hyp_trace_desc *desc;
+ size_t desc_size;
+} trace_buffer;
+
+static int hyp_trace_buffer_alloc_bpages_backing(struct hyp_trace_buffer *trace_buffer, size_t size)
+{
+ int nr_bpages = (PAGE_ALIGN(size) / PAGE_SIZE) + 1;
+ size_t backing_size;
+ void *start;
+
+ backing_size = PAGE_ALIGN(sizeof(struct simple_buffer_page) * nr_bpages *
+ num_possible_cpus());
+
+ start = alloc_pages_exact(backing_size, GFP_KERNEL_ACCOUNT);
+ if (!start)
+ return -ENOMEM;
+
+ trace_buffer->desc->bpages_backing_start = (unsigned long)start;
+ trace_buffer->desc->bpages_backing_size = backing_size;
+
+ return 0;
+}
+
+static void hyp_trace_buffer_free_bpages_backing(struct hyp_trace_buffer *trace_buffer)
+{
+ free_pages_exact((void *)trace_buffer->desc->bpages_backing_start,
+ trace_buffer->desc->bpages_backing_size);
+}
+
+static int __load_page(unsigned long va)
+{
+ return kvm_call_hyp_nvhe(__pkvm_host_share_hyp, virt_to_pfn((void *)va), 1);
+}
+
+static void __unload_page(unsigned long va)
+{
+ WARN_ON(kvm_call_hyp_nvhe(__pkvm_host_unshare_hyp, virt_to_pfn((void *)va), 1));
+}
+
+static void hyp_trace_buffer_unload_pages(struct hyp_trace_buffer *trace_buffer, int last_cpu)
+{
+ struct ring_buffer_desc *rb_desc;
+ int cpu, p;
+
+ for_each_ring_buffer_desc(rb_desc, cpu, &trace_buffer->desc->trace_buffer_desc) {
+ if (cpu > last_cpu)
+ break;
+
+ __unload_page(rb_desc->meta_va);
+ for (p = 0; p < rb_desc->nr_page_va; p++)
+ __unload_page(rb_desc->page_va[p]);
+ }
+}
+
+static int hyp_trace_buffer_load_pages(struct hyp_trace_buffer *trace_buffer)
+{
+ struct ring_buffer_desc *rb_desc;
+ int cpu, p, ret = 0;
+
+ for_each_ring_buffer_desc(rb_desc, cpu, &trace_buffer->desc->trace_buffer_desc) {
+ ret = __load_page(rb_desc->meta_va);
+ if (ret)
+ break;
+
+ for (p = 0; p < rb_desc->nr_page_va; p++) {
+ ret = __load_page(rb_desc->page_va[p]);
+ if (ret)
+ break;
+ }
+
+ if (ret) {
+ for (p--; p >= 0; p--)
+ __unload_page(rb_desc->page_va[p]);
+ break;
+ }
+ }
+
+ if (ret)
+ hyp_trace_buffer_unload_pages(trace_buffer, cpu--);
+
+ return ret;
+}
+
+static struct trace_buffer_desc *hyp_trace_load(unsigned long size, void *priv)
+{
+ struct hyp_trace_buffer *trace_buffer = priv;
+ struct hyp_trace_desc *desc;
+ size_t desc_size;
+ int ret;
+
+ if (WARN_ON(trace_buffer->desc))
+ return ERR_PTR(-EINVAL);
+
+ desc_size = trace_buffer_desc_size(size, num_possible_cpus());
+ if (desc_size == SIZE_MAX)
+ return ERR_PTR(-E2BIG);
+
+ /*
+ * The hypervisor will unmap the descriptor from the host to protect the reading. Page
+ * granularity for the allocation ensures no other useful data will be unmapped.
+ */
+ desc_size = PAGE_ALIGN(desc_size);
+ desc = (struct hyp_trace_desc *)alloc_pages_exact(desc_size, GFP_KERNEL);
+ if (!desc)
+ return ERR_PTR(-ENOMEM);
+
+ trace_buffer->desc = desc;
+
+ ret = hyp_trace_buffer_alloc_bpages_backing(trace_buffer, size);
+ if (ret)
+ goto err_free_desc;
+
+ ret = trace_remote_alloc_buffer(&desc->trace_buffer_desc, desc_size, size,
+ cpu_possible_mask);
+ if (ret)
+ goto err_free_backing;
+
+ ret = hyp_trace_buffer_load_pages(trace_buffer);
+ if (ret)
+ goto err_free_buffer;
+
+ ret = kvm_call_hyp_nvhe(__pkvm_load_tracing, (unsigned long)desc, desc_size);
+ if (ret)
+ goto err_unload_pages;
+
+ return &desc->trace_buffer_desc;
+
+err_unload_pages:
+ hyp_trace_buffer_unload_pages(trace_buffer, INT_MAX);
+
+err_free_buffer:
+ trace_remote_free_buffer(&desc->trace_buffer_desc);
+
+err_free_backing:
+ hyp_trace_buffer_free_bpages_backing(trace_buffer);
+
+err_free_desc:
+ free_pages_exact(desc, desc_size);
+ trace_buffer->desc = NULL;
+
+ return ERR_PTR(ret);
+}
+
+static void hyp_trace_unload(struct trace_buffer_desc *desc, void *priv)
+{
+ struct hyp_trace_buffer *trace_buffer = priv;
+
+ if (WARN_ON(desc != &trace_buffer->desc->trace_buffer_desc))
+ return;
+
+ kvm_call_hyp_nvhe(__pkvm_unload_tracing);
+ hyp_trace_buffer_unload_pages(trace_buffer, INT_MAX);
+ trace_remote_free_buffer(desc);
+ hyp_trace_buffer_free_bpages_backing(trace_buffer);
+ free_pages_exact(trace_buffer->desc, trace_buffer->desc_size);
+ trace_buffer->desc = NULL;
+}
+
+static int hyp_trace_enable_tracing(bool enable, void *priv)
+{
+ return kvm_call_hyp_nvhe(__pkvm_enable_tracing, enable);
+}
+
+static int hyp_trace_swap_reader_page(unsigned int cpu, void *priv)
+{
+ return kvm_call_hyp_nvhe(__pkvm_swap_reader_tracing, cpu);
+}
+
+static int hyp_trace_reset(unsigned int cpu, void *priv)
+{
+ return 0;
+}
+
+static int hyp_trace_enable_event(unsigned short id, bool enable, void *priv)
+{
+ return 0;
+}
+
+static struct trace_remote_callbacks trace_remote_callbacks = {
+ .load_trace_buffer = hyp_trace_load,
+ .unload_trace_buffer = hyp_trace_unload,
+ .enable_tracing = hyp_trace_enable_tracing,
+ .swap_reader_page = hyp_trace_swap_reader_page,
+ .reset = hyp_trace_reset,
+ .enable_event = hyp_trace_enable_event,
+};
+
+int hyp_trace_init(void)
+{
+ if (!is_protected_kvm_enabled())
+ return 0;
+
+ return trace_remote_register("hypervisor", &trace_remote_callbacks, &trace_buffer, NULL, 0);
+}
diff --git a/arch/arm64/kvm/hyp_trace.h b/arch/arm64/kvm/hyp_trace.h
new file mode 100644
index 000000000000..54d8b1f44ca5
--- /dev/null
+++ b/arch/arm64/kvm/hyp_trace.h
@@ -0,0 +1,11 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef __ARM64_KVM_HYP_TRACE_H__
+#define __ARM64_KVM_HYP_TRACE_H__
+
+#ifdef CONFIG_PKVM_TRACING
+int hyp_trace_init(void);
+#else
+static inline int hyp_trace_init(void) { return 0; }
+#endif
+#endif
--
2.51.0.618.g983fd99d29-goog
^ permalink raw reply related [flat|nested] 33+ messages in thread* [PATCH v7 23/28] KVM: arm64: Sync boot clock with the pKVM hyp
2025-10-03 13:37 [PATCH v7 00/28] Tracefs support for pKVM Vincent Donnefort
` (21 preceding siblings ...)
2025-10-03 13:38 ` [PATCH v7 22/28] KVM: arm64: Add trace remote " Vincent Donnefort
@ 2025-10-03 13:38 ` Vincent Donnefort
2025-10-03 13:38 ` [PATCH v7 24/28] KVM: arm64: Add trace reset to " Vincent Donnefort
` (4 subsequent siblings)
27 siblings, 0 replies; 33+ messages in thread
From: Vincent Donnefort @ 2025-10-03 13:38 UTC (permalink / raw)
To: rostedt, mhiramat, mathieu.desnoyers, linux-trace-kernel, maz,
oliver.upton, joey.gouly, suzuki.poulose, yuzenghui
Cc: kvmarm, linux-arm-kernel, jstultz, qperret, will, aneesh.kumar,
kernel-team, linux-kernel, Vincent Donnefort, Thomas Gleixner,
Stephen Boyd, Christopher S. Hall, Richard Cochran
Configure the pKVM hypervisor tracing clock with the kernel boot clock.
For tracing purpose, the boot clock is interesting as it doesn't stop on
suspend. However, it is corrected on a regular basis, which implies we
need to re-evaluate it every once in a while.
Cc: John Stultz <jstultz@google.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Stephen Boyd <sboyd@kernel.org>
Cc: Christopher S. Hall <christopher.s.hall@intel.com>
Cc: Richard Cochran <richardcochran@gmail.com>
Signed-off-by: Vincent Donnefort <vdonnefort@google.com>
diff --git a/arch/arm64/include/asm/kvm_asm.h b/arch/arm64/include/asm/kvm_asm.h
index 437ac948d136..d122d79718a0 100644
--- a/arch/arm64/include/asm/kvm_asm.h
+++ b/arch/arm64/include/asm/kvm_asm.h
@@ -87,6 +87,7 @@ enum __kvm_host_smccc_func {
__KVM_HOST_SMCCC_FUNC___pkvm_vcpu_load,
__KVM_HOST_SMCCC_FUNC___pkvm_vcpu_put,
__KVM_HOST_SMCCC_FUNC___pkvm_tlb_flush_vmid,
+ __KVM_HOST_SMCCC_FUNC___pkvm_update_clock_tracing,
__KVM_HOST_SMCCC_FUNC___pkvm_load_tracing,
__KVM_HOST_SMCCC_FUNC___pkvm_unload_tracing,
__KVM_HOST_SMCCC_FUNC___pkvm_enable_tracing,
diff --git a/arch/arm64/kvm/hyp/include/nvhe/trace.h b/arch/arm64/kvm/hyp/include/nvhe/trace.h
index 996e90c0974f..4e11dcdf049b 100644
--- a/arch/arm64/kvm/hyp/include/nvhe/trace.h
+++ b/arch/arm64/kvm/hyp/include/nvhe/trace.h
@@ -7,6 +7,7 @@
void *tracing_reserve_entry(unsigned long length);
void tracing_commit_entry(void);
+void __pkvm_update_clock_tracing(u32 mult, u32 shift, u64 epoch_ns, u64 epoch_cyc);
int __pkvm_load_tracing(unsigned long desc_va, size_t desc_size);
void __pkvm_unload_tracing(void);
int __pkvm_enable_tracing(bool enable);
@@ -15,6 +16,8 @@ int __pkvm_swap_reader_tracing(unsigned int cpu);
static inline void *tracing_reserve_entry(unsigned long length) { return NULL; }
static inline void tracing_commit_entry(void) { }
+static inline
+void __pkvm_update_clock_tracing(u32 mult, u32 shift, u64 epoch_ns, u64 epoch_cyc) { }
static inline int __pkvm_load_tracing(unsigned long desc_va, size_t desc_size) { return -ENODEV; }
static inline void __pkvm_unload_tracing(void) { }
static inline int __pkvm_enable_tracing(bool enable) { return -ENODEV; }
diff --git a/arch/arm64/kvm/hyp/nvhe/hyp-main.c b/arch/arm64/kvm/hyp/nvhe/hyp-main.c
index 02b2fdd9a8e4..36a263422e4e 100644
--- a/arch/arm64/kvm/hyp/nvhe/hyp-main.c
+++ b/arch/arm64/kvm/hyp/nvhe/hyp-main.c
@@ -574,6 +574,18 @@ static void handle___pkvm_teardown_vm(struct kvm_cpu_context *host_ctxt)
cpu_reg(host_ctxt, 1) = __pkvm_teardown_vm(handle);
}
+static void handle___pkvm_update_clock_tracing(struct kvm_cpu_context *host_ctxt)
+{
+ DECLARE_REG(u32, mult, host_ctxt, 1);
+ DECLARE_REG(u32, shift, host_ctxt, 2);
+ DECLARE_REG(u64, epoch_ns, host_ctxt, 3);
+ DECLARE_REG(u64, epoch_cyc, host_ctxt, 4);
+
+ __pkvm_update_clock_tracing(mult, shift, epoch_ns, epoch_cyc);
+
+ cpu_reg(host_ctxt, 1) = 0;
+}
+
static void handle___pkvm_load_tracing(struct kvm_cpu_context *host_ctxt)
{
DECLARE_REG(unsigned long, desc_hva, host_ctxt, 1);
@@ -642,6 +654,7 @@ static const hcall_t host_hcall[] = {
HANDLE_FUNC(__pkvm_vcpu_load),
HANDLE_FUNC(__pkvm_vcpu_put),
HANDLE_FUNC(__pkvm_tlb_flush_vmid),
+ HANDLE_FUNC(__pkvm_update_clock_tracing),
HANDLE_FUNC(__pkvm_load_tracing),
HANDLE_FUNC(__pkvm_unload_tracing),
HANDLE_FUNC(__pkvm_enable_tracing),
diff --git a/arch/arm64/kvm/hyp/nvhe/trace.c b/arch/arm64/kvm/hyp/nvhe/trace.c
index def5cbc75722..d146ac3046de 100644
--- a/arch/arm64/kvm/hyp/nvhe/trace.c
+++ b/arch/arm64/kvm/hyp/nvhe/trace.c
@@ -255,3 +255,19 @@ int __pkvm_swap_reader_tracing(unsigned int cpu)
return ret;
}
+
+void __pkvm_update_clock_tracing(u32 mult, u32 shift, u64 epoch_ns, u64 epoch_cyc)
+{
+ int cpu;
+
+ /* After this loop, all CPUs are observing the new bank... */
+ for (cpu = 0; cpu < hyp_nr_cpus; cpu++) {
+ struct simple_rb_per_cpu *simple_rb = per_cpu_ptr(trace_buffer.simple_rbs, cpu);
+
+ while (READ_ONCE(simple_rb->status) == SIMPLE_RB_WRITING)
+ ;
+ }
+
+ /* ...we can now override the old one and swap. */
+ trace_clock_update(mult, shift, epoch_ns, epoch_cyc);
+}
diff --git a/arch/arm64/kvm/hyp_trace.c b/arch/arm64/kvm/hyp_trace.c
index 98051c3fb0c2..4f154ec743f3 100644
--- a/arch/arm64/kvm/hyp_trace.c
+++ b/arch/arm64/kvm/hyp_trace.c
@@ -5,6 +5,7 @@
*/
#include <linux/trace_remote.h>
+#include <linux/tracefs.h>
#include <linux/simple_ring_buffer.h>
#include <asm/kvm_host.h>
@@ -12,6 +13,121 @@
#include "hyp_trace.h"
+/* Same 10min used by clocksource when width is more than 32-bits */
+#define CLOCK_MAX_CONVERSION_S 600
+/*
+ * Time to give for the clock init. Long enough to get a good mult/shift
+ * estimation. Short enough to not delay the tracing start too much.
+ */
+#define CLOCK_INIT_MS 100
+/*
+ * Time between clock checks. Must be small enough to catch clock deviation when
+ * it is still tiny.
+ */
+#define CLOCK_UPDATE_MS 500
+
+static struct hyp_trace_clock {
+ u64 cycles;
+ u64 cyc_overflow64;
+ u64 boot;
+ u32 mult;
+ u32 shift;
+ struct delayed_work work;
+ struct completion ready;
+ struct mutex lock;
+ bool running;
+} hyp_clock;
+
+static void __hyp_clock_work(struct work_struct *work)
+{
+ struct delayed_work *dwork = to_delayed_work(work);
+ struct hyp_trace_clock *hyp_clock;
+ struct system_time_snapshot snap;
+ u64 rate, delta_cycles;
+ u64 boot, delta_boot;
+
+ hyp_clock = container_of(dwork, struct hyp_trace_clock, work);
+
+ ktime_get_snapshot(&snap);
+ boot = ktime_to_ns(snap.boot);
+
+ delta_boot = boot - hyp_clock->boot;
+ delta_cycles = snap.cycles - hyp_clock->cycles;
+
+ /* Compare hyp clock with the kernel boot clock */
+ if (hyp_clock->mult) {
+ u64 err, cur = delta_cycles;
+
+ if (WARN_ON_ONCE(cur >= hyp_clock->cyc_overflow64)) {
+ __uint128_t tmp = (__uint128_t)cur * hyp_clock->mult;
+
+ cur = tmp >> hyp_clock->shift;
+ } else {
+ cur *= hyp_clock->mult;
+ cur >>= hyp_clock->shift;
+ }
+ cur += hyp_clock->boot;
+
+ err = abs_diff(cur, boot);
+ /* No deviation, only update epoch if necessary */
+ if (!err) {
+ if (delta_cycles >= (hyp_clock->cyc_overflow64 >> 1))
+ goto fast_forward;
+
+ goto resched;
+ }
+
+ /* Warn if the error is above tracing precision (1us) */
+ if (err > NSEC_PER_USEC)
+ pr_warn_ratelimited("hyp trace clock off by %lluus\n",
+ err / NSEC_PER_USEC);
+ }
+
+ rate = div64_u64(delta_cycles * NSEC_PER_SEC, delta_boot);
+
+ clocks_calc_mult_shift(&hyp_clock->mult, &hyp_clock->shift,
+ rate, NSEC_PER_SEC, CLOCK_MAX_CONVERSION_S);
+
+ /* Add a comfortable 50% margin */
+ hyp_clock->cyc_overflow64 = (U64_MAX / hyp_clock->mult) >> 1;
+
+fast_forward:
+ hyp_clock->cycles = snap.cycles;
+ hyp_clock->boot = boot;
+ kvm_call_hyp_nvhe(__pkvm_update_clock_tracing, hyp_clock->mult,
+ hyp_clock->shift, hyp_clock->boot, hyp_clock->cycles);
+ complete(&hyp_clock->ready);
+
+resched:
+ schedule_delayed_work(&hyp_clock->work,
+ msecs_to_jiffies(CLOCK_UPDATE_MS));
+}
+
+static void hyp_trace_clock_enable(struct hyp_trace_clock *hyp_clock, bool enable)
+{
+ struct system_time_snapshot snap;
+
+ if (hyp_clock->running == enable)
+ return;
+
+ if (!enable) {
+ cancel_delayed_work_sync(&hyp_clock->work);
+ hyp_clock->running = false;
+ }
+
+ ktime_get_snapshot(&snap);
+
+ hyp_clock->boot = ktime_to_ns(snap.boot);
+ hyp_clock->cycles = snap.cycles;
+ hyp_clock->mult = 0;
+
+ init_completion(&hyp_clock->ready);
+ INIT_DELAYED_WORK(&hyp_clock->work, __hyp_clock_work);
+ schedule_delayed_work(&hyp_clock->work, msecs_to_jiffies(CLOCK_INIT_MS));
+ wait_for_completion(&hyp_clock->ready);
+ hyp_clock->running = true;
+}
+
/* Access to this struct within the trace_remote_callbacks are protected by the trace_remote lock */
static struct hyp_trace_buffer {
struct hyp_trace_desc *desc;
@@ -174,6 +290,8 @@ static void hyp_trace_unload(struct trace_buffer_desc *desc, void *priv)
static int hyp_trace_enable_tracing(bool enable, void *priv)
{
+ hyp_trace_clock_enable(&hyp_clock, enable);
+
return kvm_call_hyp_nvhe(__pkvm_enable_tracing, enable);
}
@@ -192,7 +310,22 @@ static int hyp_trace_enable_event(unsigned short id, bool enable, void *priv)
return 0;
}
+static int hyp_trace_clock_show(struct seq_file *m, void *v)
+{
+ seq_puts(m, "[boot]\n");
+
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(hyp_trace_clock);
+
+static int hyp_trace_init_tracefs(struct dentry *d, void *priv)
+{
+ return tracefs_create_file("trace_clock", 0440, d, NULL, &hyp_trace_clock_fops) ?
+ 0 : -ENOMEM;
+}
+
static struct trace_remote_callbacks trace_remote_callbacks = {
+ .init = hyp_trace_init_tracefs,
.load_trace_buffer = hyp_trace_load,
.unload_trace_buffer = hyp_trace_unload,
.enable_tracing = hyp_trace_enable_tracing,
--
2.51.0.618.g983fd99d29-goog
^ permalink raw reply related [flat|nested] 33+ messages in thread* [PATCH v7 24/28] KVM: arm64: Add trace reset to the pKVM hyp
2025-10-03 13:37 [PATCH v7 00/28] Tracefs support for pKVM Vincent Donnefort
` (22 preceding siblings ...)
2025-10-03 13:38 ` [PATCH v7 23/28] KVM: arm64: Sync boot clock with " Vincent Donnefort
@ 2025-10-03 13:38 ` Vincent Donnefort
2025-10-03 13:38 ` [PATCH v7 25/28] KVM: arm64: Add event support to the pKVM hyp and trace remote Vincent Donnefort
` (3 subsequent siblings)
27 siblings, 0 replies; 33+ messages in thread
From: Vincent Donnefort @ 2025-10-03 13:38 UTC (permalink / raw)
To: rostedt, mhiramat, mathieu.desnoyers, linux-trace-kernel, maz,
oliver.upton, joey.gouly, suzuki.poulose, yuzenghui
Cc: kvmarm, linux-arm-kernel, jstultz, qperret, will, aneesh.kumar,
kernel-team, linux-kernel, Vincent Donnefort
Let the hypervisor reset the trace buffer when triggered from the
tracefs file remotes/hypervisor/trace.
Signed-off-by: Vincent Donnefort <vdonnefort@google.com>
diff --git a/arch/arm64/include/asm/kvm_asm.h b/arch/arm64/include/asm/kvm_asm.h
index d122d79718a0..c40820a4b049 100644
--- a/arch/arm64/include/asm/kvm_asm.h
+++ b/arch/arm64/include/asm/kvm_asm.h
@@ -91,6 +91,7 @@ enum __kvm_host_smccc_func {
__KVM_HOST_SMCCC_FUNC___pkvm_load_tracing,
__KVM_HOST_SMCCC_FUNC___pkvm_unload_tracing,
__KVM_HOST_SMCCC_FUNC___pkvm_enable_tracing,
+ __KVM_HOST_SMCCC_FUNC___pkvm_reset_tracing,
__KVM_HOST_SMCCC_FUNC___pkvm_swap_reader_tracing,
};
diff --git a/arch/arm64/kvm/hyp/include/nvhe/trace.h b/arch/arm64/kvm/hyp/include/nvhe/trace.h
index 4e11dcdf049b..0d2732f0d406 100644
--- a/arch/arm64/kvm/hyp/include/nvhe/trace.h
+++ b/arch/arm64/kvm/hyp/include/nvhe/trace.h
@@ -11,6 +11,7 @@ void __pkvm_update_clock_tracing(u32 mult, u32 shift, u64 epoch_ns, u64 epoch_cy
int __pkvm_load_tracing(unsigned long desc_va, size_t desc_size);
void __pkvm_unload_tracing(void);
int __pkvm_enable_tracing(bool enable);
+int __pkvm_reset_tracing(unsigned int cpu);
int __pkvm_swap_reader_tracing(unsigned int cpu);
#else
static inline void *tracing_reserve_entry(unsigned long length) { return NULL; }
@@ -21,6 +22,7 @@ void __pkvm_update_clock_tracing(u32 mult, u32 shift, u64 epoch_ns, u64 epoch_cy
static inline int __pkvm_load_tracing(unsigned long desc_va, size_t desc_size) { return -ENODEV; }
static inline void __pkvm_unload_tracing(void) { }
static inline int __pkvm_enable_tracing(bool enable) { return -ENODEV; }
+static inline int __pkvm_reset_tracing(unsigned int cpu) { return -ENODEV; }
static inline int __pkvm_swap_reader_tracing(unsigned int cpu) { return -ENODEV; }
#endif
#endif
diff --git a/arch/arm64/kvm/hyp/nvhe/hyp-main.c b/arch/arm64/kvm/hyp/nvhe/hyp-main.c
index 36a263422e4e..a7c5e9fc27a0 100644
--- a/arch/arm64/kvm/hyp/nvhe/hyp-main.c
+++ b/arch/arm64/kvm/hyp/nvhe/hyp-main.c
@@ -608,6 +608,13 @@ static void handle___pkvm_enable_tracing(struct kvm_cpu_context *host_ctxt)
cpu_reg(host_ctxt, 1) = __pkvm_enable_tracing(enable);
}
+static void handle___pkvm_reset_tracing(struct kvm_cpu_context *host_ctxt)
+{
+ DECLARE_REG(unsigned int, cpu, host_ctxt, 1);
+
+ cpu_reg(host_ctxt, 1) = __pkvm_reset_tracing(cpu);
+}
+
static void handle___pkvm_swap_reader_tracing(struct kvm_cpu_context *host_ctxt)
{
DECLARE_REG(unsigned int, cpu, host_ctxt, 1);
@@ -658,6 +665,7 @@ static const hcall_t host_hcall[] = {
HANDLE_FUNC(__pkvm_load_tracing),
HANDLE_FUNC(__pkvm_unload_tracing),
HANDLE_FUNC(__pkvm_enable_tracing),
+ HANDLE_FUNC(__pkvm_reset_tracing),
HANDLE_FUNC(__pkvm_swap_reader_tracing),
};
diff --git a/arch/arm64/kvm/hyp/nvhe/trace.c b/arch/arm64/kvm/hyp/nvhe/trace.c
index d146ac3046de..1fd9b32950fe 100644
--- a/arch/arm64/kvm/hyp/nvhe/trace.c
+++ b/arch/arm64/kvm/hyp/nvhe/trace.c
@@ -236,6 +236,25 @@ int __pkvm_enable_tracing(bool enable)
return ret;
}
+int __pkvm_reset_tracing(unsigned int cpu)
+{
+ int ret = 0;
+
+ if (cpu >= hyp_nr_cpus)
+ return -EINVAL;
+
+ hyp_spin_lock(&trace_buffer.lock);
+
+ if (hyp_trace_buffer_loaded(&trace_buffer))
+ ret = simple_ring_buffer_reset(per_cpu_ptr(trace_buffer.simple_rbs, cpu));
+ else
+ ret = -ENODEV;
+
+ hyp_spin_unlock(&trace_buffer.lock);
+
+ return ret;
+}
+
int __pkvm_swap_reader_tracing(unsigned int cpu)
{
int ret;
diff --git a/arch/arm64/kvm/hyp_trace.c b/arch/arm64/kvm/hyp_trace.c
index 4f154ec743f3..1062b4310f8c 100644
--- a/arch/arm64/kvm/hyp_trace.c
+++ b/arch/arm64/kvm/hyp_trace.c
@@ -302,7 +302,7 @@ static int hyp_trace_swap_reader_page(unsigned int cpu, void *priv)
static int hyp_trace_reset(unsigned int cpu, void *priv)
{
- return 0;
+ return kvm_call_hyp_nvhe(__pkvm_reset_tracing, cpu);
}
static int hyp_trace_enable_event(unsigned short id, bool enable, void *priv)
--
2.51.0.618.g983fd99d29-goog
^ permalink raw reply related [flat|nested] 33+ messages in thread* [PATCH v7 25/28] KVM: arm64: Add event support to the pKVM hyp and trace remote
2025-10-03 13:37 [PATCH v7 00/28] Tracefs support for pKVM Vincent Donnefort
` (23 preceding siblings ...)
2025-10-03 13:38 ` [PATCH v7 24/28] KVM: arm64: Add trace reset to " Vincent Donnefort
@ 2025-10-03 13:38 ` Vincent Donnefort
2025-10-03 13:38 ` [PATCH v7 26/28] KVM: arm64: Add hyp_enter/hyp_exit events to pKVM hyp Vincent Donnefort
` (2 subsequent siblings)
27 siblings, 0 replies; 33+ messages in thread
From: Vincent Donnefort @ 2025-10-03 13:38 UTC (permalink / raw)
To: rostedt, mhiramat, mathieu.desnoyers, linux-trace-kernel, maz,
oliver.upton, joey.gouly, suzuki.poulose, yuzenghui
Cc: kvmarm, linux-arm-kernel, jstultz, qperret, will, aneesh.kumar,
kernel-team, linux-kernel, Vincent Donnefort
Allow the creation of hypervisor and trace remote events with a single
macro HYP_EVENT(). That macro expands in the kernel side to add all
the required declarations (based on REMOTE_EVENT()) as well as in the
hypervisor side to create the trace_<event>() function.
Signed-off-by: Vincent Donnefort <vdonnefort@google.com>
diff --git a/arch/arm64/include/asm/kvm_asm.h b/arch/arm64/include/asm/kvm_asm.h
index c40820a4b049..79019e11f529 100644
--- a/arch/arm64/include/asm/kvm_asm.h
+++ b/arch/arm64/include/asm/kvm_asm.h
@@ -93,6 +93,7 @@ enum __kvm_host_smccc_func {
__KVM_HOST_SMCCC_FUNC___pkvm_enable_tracing,
__KVM_HOST_SMCCC_FUNC___pkvm_reset_tracing,
__KVM_HOST_SMCCC_FUNC___pkvm_swap_reader_tracing,
+ __KVM_HOST_SMCCC_FUNC___pkvm_enable_event,
};
#define DECLARE_KVM_VHE_SYM(sym) extern char sym[]
diff --git a/arch/arm64/include/asm/kvm_define_hypevents.h b/arch/arm64/include/asm/kvm_define_hypevents.h
new file mode 100644
index 000000000000..0ef5a9eefcbe
--- /dev/null
+++ b/arch/arm64/include/asm/kvm_define_hypevents.h
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef HYP_EVENT_FILE
+# undef __ARM64_KVM_HYPEVENTS_H_
+# define REMOTE_EVENT_INCLUDE_FILE arch/arm64/include/asm/kvm_hypevents.h
+#else
+# define REMOTE_EVENT_INCLUDE_FILE HYP_EVENT_FILE
+#endif
+
+#define REMOTE_EVENT_SECTION "_hyp_events"
+
+#define HE_STRUCT(__args) __args
+#define HE_PRINTK(__args...) __args
+#define he_field re_field
+
+#define HYP_EVENT(__name, __proto, __struct, __assign, __printk) \
+ REMOTE_EVENT(__name, 0, RE_STRUCT(__struct), RE_PRINTK(__printk))
+
+#define HYP_EVENT_MULTI_READ
+
+#include <trace/define_remote_events.h>
diff --git a/arch/arm64/include/asm/kvm_hypevents.h b/arch/arm64/include/asm/kvm_hypevents.h
new file mode 100644
index 000000000000..d6e033c96c52
--- /dev/null
+++ b/arch/arm64/include/asm/kvm_hypevents.h
@@ -0,0 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#if !defined(__ARM64_KVM_HYPEVENTS_H_) || defined(HYP_EVENT_MULTI_READ)
+#define __ARM64_KVM_HYPEVENTS_H_
+
+#ifdef __KVM_NVHE_HYPERVISOR__
+#include <nvhe/trace.h>
+#endif
+
+#endif
diff --git a/arch/arm64/include/asm/kvm_hyptrace.h b/arch/arm64/include/asm/kvm_hyptrace.h
index 9c30a479bc36..d6e0953a07d6 100644
--- a/arch/arm64/include/asm/kvm_hyptrace.h
+++ b/arch/arm64/include/asm/kvm_hyptrace.h
@@ -10,4 +10,17 @@ struct hyp_trace_desc {
struct trace_buffer_desc trace_buffer_desc;
};
+
+struct hyp_event_id {
+ unsigned short id;
+ void *data;
+};
+
+extern struct remote_event __hyp_events_start[];
+extern struct remote_event __hyp_events_end[];
+
+/* hyp_event section used by the hypervisor */
+extern struct hyp_event_id __hyp_event_ids_start[];
+extern struct hyp_event_id __hyp_event_ids_end[];
+
#endif
diff --git a/arch/arm64/kernel/image-vars.h b/arch/arm64/kernel/image-vars.h
index 714b0b5ec5ac..c1dbf0d317db 100644
--- a/arch/arm64/kernel/image-vars.h
+++ b/arch/arm64/kernel/image-vars.h
@@ -134,6 +134,10 @@ KVM_NVHE_ALIAS(__hyp_data_start);
KVM_NVHE_ALIAS(__hyp_data_end);
KVM_NVHE_ALIAS(__hyp_rodata_start);
KVM_NVHE_ALIAS(__hyp_rodata_end);
+#ifdef CONFIG_PKVM_TRACING
+KVM_NVHE_ALIAS(__hyp_event_ids_start);
+KVM_NVHE_ALIAS(__hyp_event_ids_end);
+#endif
/* pKVM static key */
KVM_NVHE_ALIAS(kvm_protected_mode_initialized);
diff --git a/arch/arm64/kernel/vmlinux.lds.S b/arch/arm64/kernel/vmlinux.lds.S
index ad6133b89e7a..0e201a3c8de5 100644
--- a/arch/arm64/kernel/vmlinux.lds.S
+++ b/arch/arm64/kernel/vmlinux.lds.S
@@ -13,12 +13,23 @@
*(__kvm_ex_table) \
__stop___kvm_ex_table = .;
+#ifdef CONFIG_PKVM_TRACING
+#define HYPERVISOR_EVENT_IDS \
+ . = ALIGN(PAGE_SIZE); \
+ __hyp_event_ids_start = .; \
+ *(HYP_SECTION_NAME(.event_ids)) \
+ __hyp_event_ids_end = .;
+#else
+#define HYPERVISOR_EVENT_IDS
+#endif
+
#define HYPERVISOR_RODATA_SECTIONS \
HYP_SECTION_NAME(.rodata) : { \
. = ALIGN(PAGE_SIZE); \
__hyp_rodata_start = .; \
*(HYP_SECTION_NAME(.data..ro_after_init)) \
*(HYP_SECTION_NAME(.rodata)) \
+ HYPERVISOR_EVENT_IDS \
. = ALIGN(PAGE_SIZE); \
__hyp_rodata_end = .; \
}
@@ -307,6 +318,13 @@ SECTIONS
HYPERVISOR_DATA_SECTION
+#ifdef CONFIG_PKVM_TRACING
+ .data.hyp_events : {
+ __hyp_events_start = .;
+ *(SORT(_hyp_events.*))
+ __hyp_events_end = .;
+ }
+#endif
/*
* Data written with the MMU off but read with the MMU on requires
* cache lines to be invalidated, discarding up to a Cache Writeback
diff --git a/arch/arm64/kvm/hyp/include/nvhe/define_events.h b/arch/arm64/kvm/hyp/include/nvhe/define_events.h
new file mode 100644
index 000000000000..2298b49cb355
--- /dev/null
+++ b/arch/arm64/kvm/hyp/include/nvhe/define_events.h
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef HYP_EVENT_FILE
+# define __HYP_EVENT_FILE <asm/kvm_hypevents.h>
+#else
+# define __HYP_EVENT_FILE __stringify(HYP_EVENT_FILE)
+#endif
+
+#undef HYP_EVENT
+#define HYP_EVENT(__name, __proto, __struct, __assign, __printk) \
+ atomic_t __ro_after_init __name##_enabled = ATOMIC_INIT(0); \
+ struct hyp_event_id hyp_event_id_##__name \
+ __section(".hyp.event_ids."#__name) = { \
+ .data = (void *)&__name##_enabled, \
+ }
+
+#define HYP_EVENT_MULTI_READ
+#include __HYP_EVENT_FILE
+#undef HYP_EVENT_MULTI_READ
+
+#undef HYP_EVENT
diff --git a/arch/arm64/kvm/hyp/include/nvhe/trace.h b/arch/arm64/kvm/hyp/include/nvhe/trace.h
index 0d2732f0d406..f7b286e92853 100644
--- a/arch/arm64/kvm/hyp/include/nvhe/trace.h
+++ b/arch/arm64/kvm/hyp/include/nvhe/trace.h
@@ -1,21 +1,52 @@
/* SPDX-License-Identifier: GPL-2.0-only */
#ifndef __ARM64_KVM_HYP_NVHE_TRACE_H
#define __ARM64_KVM_HYP_NVHE_TRACE_H
+
+#include <linux/trace_remote_event.h>
+
#include <asm/kvm_hyptrace.h>
+#define HE_PROTO(__args...) __args
+
#ifdef CONFIG_PKVM_TRACING
void *tracing_reserve_entry(unsigned long length);
void tracing_commit_entry(void);
+#define HE_ASSIGN(__args...) __args
+#define HE_STRUCT RE_STRUCT
+#define he_field re_field
+
+#define HYP_EVENT(__name, __proto, __struct, __assign, __printk) \
+ REMOTE_EVENT_FORMAT(__name, __struct); \
+ extern atomic_t __name##_enabled; \
+ extern struct hyp_event_id hyp_event_id_##__name; \
+ static __always_inline void trace_##__name(__proto) \
+ { \
+ struct remote_event_format_##__name *__entry; \
+ size_t length = sizeof(*__entry); \
+ \
+ if (!atomic_read(&__name##_enabled)) \
+ return; \
+ __entry = tracing_reserve_entry(length); \
+ if (!__entry) \
+ return; \
+ __entry->hdr.id = hyp_event_id_##__name.id; \
+ __assign \
+ tracing_commit_entry(); \
+ }
+
void __pkvm_update_clock_tracing(u32 mult, u32 shift, u64 epoch_ns, u64 epoch_cyc);
int __pkvm_load_tracing(unsigned long desc_va, size_t desc_size);
void __pkvm_unload_tracing(void);
int __pkvm_enable_tracing(bool enable);
int __pkvm_reset_tracing(unsigned int cpu);
int __pkvm_swap_reader_tracing(unsigned int cpu);
+int __pkvm_enable_event(unsigned short id, bool enable);
#else
static inline void *tracing_reserve_entry(unsigned long length) { return NULL; }
static inline void tracing_commit_entry(void) { }
+#define HYP_EVENT(__name, __proto, __struct, __assign, __printk) \
+ static inline void trace_##__name(__proto) {}
static inline
void __pkvm_update_clock_tracing(u32 mult, u32 shift, u64 epoch_ns, u64 epoch_cyc) { }
@@ -24,5 +55,6 @@ static inline void __pkvm_unload_tracing(void) { }
static inline int __pkvm_enable_tracing(bool enable) { return -ENODEV; }
static inline int __pkvm_reset_tracing(unsigned int cpu) { return -ENODEV; }
static inline int __pkvm_swap_reader_tracing(unsigned int cpu) { return -ENODEV; }
+static inline int __pkvm_enable_event(unsigned short id, bool enable) { return -ENODEV; }
#endif
#endif
diff --git a/arch/arm64/kvm/hyp/nvhe/Makefile b/arch/arm64/kvm/hyp/nvhe/Makefile
index e640f12808f7..09bb8dfa7ca2 100644
--- a/arch/arm64/kvm/hyp/nvhe/Makefile
+++ b/arch/arm64/kvm/hyp/nvhe/Makefile
@@ -28,7 +28,7 @@ hyp-obj-y := timer-sr.o sysreg-sr.o debug-sr.o switch.o tlb.o hyp-init.o host.o
hyp-obj-y += ../vgic-v3-sr.o ../aarch32.o ../vgic-v2-cpuif-proxy.o ../entry.o \
../fpsimd.o ../hyp-entry.o ../exception.o ../pgtable.o
hyp-obj-$(CONFIG_LIST_HARDENED) += list_debug.o
-hyp-obj-$(CONFIG_PKVM_TRACING) += clock.o trace.o ../../../../../kernel/trace/simple_ring_buffer.o
+hyp-obj-$(CONFIG_PKVM_TRACING) += clock.o trace.o ../../../../../kernel/trace/simple_ring_buffer.o events.o
hyp-obj-y += $(lib-objs)
##
diff --git a/arch/arm64/kvm/hyp/nvhe/events.c b/arch/arm64/kvm/hyp/nvhe/events.c
new file mode 100644
index 000000000000..5905b42cb0d0
--- /dev/null
+++ b/arch/arm64/kvm/hyp/nvhe/events.c
@@ -0,0 +1,36 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2025 Google LLC
+ * Author: Vincent Donnefort <vdonnefort@google.com>
+ */
+
+#include <nvhe/mm.h>
+#include <nvhe/trace.h>
+
+#include <nvhe/define_events.h>
+
+extern struct hyp_event_id __hyp_event_ids_start[];
+extern struct hyp_event_id __hyp_event_ids_end[];
+
+int __pkvm_enable_event(unsigned short id, bool enable)
+{
+ struct hyp_event_id *event_id = __hyp_event_ids_start;
+ atomic_t *enable_key;
+
+ for (; (unsigned long)event_id < (unsigned long)__hyp_event_ids_end;
+ event_id++) {
+ if (event_id->id != id)
+ continue;
+
+ enable_key = (atomic_t *)event_id->data;
+ enable_key = hyp_fixmap_map(__hyp_pa(enable_key));
+
+ atomic_set(enable_key, enable);
+
+ hyp_fixmap_unmap();
+
+ return 0;
+ }
+
+ return -EINVAL;
+}
diff --git a/arch/arm64/kvm/hyp/nvhe/hyp-main.c b/arch/arm64/kvm/hyp/nvhe/hyp-main.c
index a7c5e9fc27a0..aebed41f7de7 100644
--- a/arch/arm64/kvm/hyp/nvhe/hyp-main.c
+++ b/arch/arm64/kvm/hyp/nvhe/hyp-main.c
@@ -622,6 +622,14 @@ static void handle___pkvm_swap_reader_tracing(struct kvm_cpu_context *host_ctxt)
cpu_reg(host_ctxt, 1) = __pkvm_swap_reader_tracing(cpu);
}
+static void handle___pkvm_enable_event(struct kvm_cpu_context *host_ctxt)
+{
+ DECLARE_REG(unsigned short, id, host_ctxt, 1);
+ DECLARE_REG(bool, enable, host_ctxt, 2);
+
+ cpu_reg(host_ctxt, 1) = __pkvm_enable_event(id, enable);
+}
+
typedef void (*hcall_t)(struct kvm_cpu_context *);
#define HANDLE_FUNC(x) [__KVM_HOST_SMCCC_FUNC_##x] = (hcall_t)handle_##x
@@ -667,6 +675,7 @@ static const hcall_t host_hcall[] = {
HANDLE_FUNC(__pkvm_enable_tracing),
HANDLE_FUNC(__pkvm_reset_tracing),
HANDLE_FUNC(__pkvm_swap_reader_tracing),
+ HANDLE_FUNC(__pkvm_enable_event),
};
static void handle_host_hcall(struct kvm_cpu_context *host_ctxt)
diff --git a/arch/arm64/kvm/hyp/nvhe/hyp.lds.S b/arch/arm64/kvm/hyp/nvhe/hyp.lds.S
index d724f6d69302..a68411bf4bef 100644
--- a/arch/arm64/kvm/hyp/nvhe/hyp.lds.S
+++ b/arch/arm64/kvm/hyp/nvhe/hyp.lds.S
@@ -16,6 +16,12 @@ SECTIONS {
HYP_SECTION(.text)
HYP_SECTION(.data..ro_after_init)
HYP_SECTION(.rodata)
+#ifdef CONFIG_PKVM_TRACING
+ . = ALIGN(PAGE_SIZE);
+ BEGIN_HYP_SECTION(.event_ids)
+ *(SORT(.hyp.event_ids.*))
+ END_HYP_SECTION
+#endif
/*
* .hyp..data..percpu needs to be page aligned to maintain the same
diff --git a/arch/arm64/kvm/hyp_trace.c b/arch/arm64/kvm/hyp_trace.c
index 1062b4310f8c..73539f5b5e42 100644
--- a/arch/arm64/kvm/hyp_trace.c
+++ b/arch/arm64/kvm/hyp_trace.c
@@ -307,7 +307,7 @@ static int hyp_trace_reset(unsigned int cpu, void *priv)
static int hyp_trace_enable_event(unsigned short id, bool enable, void *priv)
{
- return 0;
+ return kvm_call_hyp_nvhe(__pkvm_enable_event, id, enable);
}
static int hyp_trace_clock_show(struct seq_file *m, void *v)
@@ -334,10 +334,27 @@ static struct trace_remote_callbacks trace_remote_callbacks = {
.enable_event = hyp_trace_enable_event,
};
+#include <asm/kvm_define_hypevents.h>
+
+static void hyp_trace_init_events(void)
+{
+ struct hyp_event_id *hyp_event_id = __hyp_event_ids_start;
+ struct remote_event *event = __hyp_events_start;
+ int id = 0;
+
+ /* Events on both sides hypervisor are sorted */
+ for (; (unsigned long)event < (unsigned long)__hyp_events_end;
+ event++, hyp_event_id++, id++)
+ event->id = hyp_event_id->id = id;
+}
+
int hyp_trace_init(void)
{
if (!is_protected_kvm_enabled())
return 0;
- return trace_remote_register("hypervisor", &trace_remote_callbacks, &trace_buffer, NULL, 0);
+ hyp_trace_init_events();
+
+ return trace_remote_register("hypervisor", &trace_remote_callbacks, &trace_buffer,
+ __hyp_events_start, __hyp_events_end - __hyp_events_start);
}
diff --git a/kernel/trace/trace_remote.c b/kernel/trace/trace_remote.c
index 1bc9c9a5197f..dc6bc387ebca 100644
--- a/kernel/trace/trace_remote.c
+++ b/kernel/trace/trace_remote.c
@@ -1037,7 +1037,7 @@ static int remote_event_format_show(struct seq_file *s, void *unused)
while (field->name) {
seq_printf(s, "\tfield:%s %s;\toffset:%zu;\tsize:%u;\tsigned:%d;\n",
field->type, field->name, offset, field->size,
- !field->is_signed);
+ field->is_signed);
offset += field->size;
field++;
}
@@ -1068,7 +1068,7 @@ static int remote_event_callback(const char *name, umode_t *mode, void **data,
if (!strcmp(name, "format")) {
*mode = TRACEFS_MODE_READ;
- *fops = &remote_event_id_fops;
+ *fops = &remote_event_format_fops;
return 1;
}
--
2.51.0.618.g983fd99d29-goog
^ permalink raw reply related [flat|nested] 33+ messages in thread* [PATCH v7 26/28] KVM: arm64: Add hyp_enter/hyp_exit events to pKVM hyp
2025-10-03 13:37 [PATCH v7 00/28] Tracefs support for pKVM Vincent Donnefort
` (24 preceding siblings ...)
2025-10-03 13:38 ` [PATCH v7 25/28] KVM: arm64: Add event support to the pKVM hyp and trace remote Vincent Donnefort
@ 2025-10-03 13:38 ` Vincent Donnefort
2025-10-03 13:38 ` [PATCH v7 27/28] KVM: arm64: Add selftest event support " Vincent Donnefort
2025-10-03 13:38 ` [PATCH v7 28/28] tracing: selftests: Add pKVM trace remote tests Vincent Donnefort
27 siblings, 0 replies; 33+ messages in thread
From: Vincent Donnefort @ 2025-10-03 13:38 UTC (permalink / raw)
To: rostedt, mhiramat, mathieu.desnoyers, linux-trace-kernel, maz,
oliver.upton, joey.gouly, suzuki.poulose, yuzenghui
Cc: kvmarm, linux-arm-kernel, jstultz, qperret, will, aneesh.kumar,
kernel-team, linux-kernel, Vincent Donnefort
The hyp_enter and hyp_exit events are logged by the hypervisor any time
it is entered and exited.
Signed-off-by: Vincent Donnefort <vdonnefort@google.com>
diff --git a/arch/arm64/include/asm/kvm_hypevents.h b/arch/arm64/include/asm/kvm_hypevents.h
index d6e033c96c52..ce3953bc884a 100644
--- a/arch/arm64/include/asm/kvm_hypevents.h
+++ b/arch/arm64/include/asm/kvm_hypevents.h
@@ -7,4 +7,21 @@
#include <nvhe/trace.h>
#endif
+HYP_EVENT(hyp_enter,
+ HE_PROTO(void),
+ HE_STRUCT(
+ ),
+ HE_ASSIGN(
+ ),
+ HE_PRINTK()
+);
+
+HYP_EVENT(hyp_exit,
+ HE_PROTO(void),
+ HE_STRUCT(
+ ),
+ HE_ASSIGN(
+ ),
+ HE_PRINTK()
+);
#endif
diff --git a/arch/arm64/kvm/hyp/include/nvhe/arm-smccc.h b/arch/arm64/kvm/hyp/include/nvhe/arm-smccc.h
new file mode 100644
index 000000000000..4b69d33e4f2d
--- /dev/null
+++ b/arch/arm64/kvm/hyp/include/nvhe/arm-smccc.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+
+#include <asm/kvm_hypevents.h>
+
+#include <linux/arm-smccc.h>
+
+#undef arm_smccc_1_1_smc
+#define arm_smccc_1_1_smc(...) \
+ do { \
+ trace_hyp_exit(); \
+ __arm_smccc_1_1(SMCCC_SMC_INST, __VA_ARGS__); \
+ trace_hyp_enter(); \
+ } while (0)
diff --git a/arch/arm64/kvm/hyp/nvhe/ffa.c b/arch/arm64/kvm/hyp/nvhe/ffa.c
index 3369dd0c4009..e00931fd194f 100644
--- a/arch/arm64/kvm/hyp/nvhe/ffa.c
+++ b/arch/arm64/kvm/hyp/nvhe/ffa.c
@@ -26,10 +26,10 @@
* the duration and are therefore serialised.
*/
-#include <linux/arm-smccc.h>
#include <linux/arm_ffa.h>
#include <asm/kvm_pkvm.h>
+#include <nvhe/arm-smccc.h>
#include <nvhe/ffa.h>
#include <nvhe/mem_protect.h>
#include <nvhe/memory.h>
diff --git a/arch/arm64/kvm/hyp/nvhe/hyp-main.c b/arch/arm64/kvm/hyp/nvhe/hyp-main.c
index aebed41f7de7..f8361520967b 100644
--- a/arch/arm64/kvm/hyp/nvhe/hyp-main.c
+++ b/arch/arm64/kvm/hyp/nvhe/hyp-main.c
@@ -12,6 +12,7 @@
#include <asm/kvm_emulate.h>
#include <asm/kvm_host.h>
#include <asm/kvm_hyp.h>
+#include <asm/kvm_hypevents.h>
#include <asm/kvm_mmu.h>
#include <nvhe/ffa.h>
@@ -716,7 +717,9 @@ static void handle_host_hcall(struct kvm_cpu_context *host_ctxt)
static void default_host_smc_handler(struct kvm_cpu_context *host_ctxt)
{
+ trace_hyp_exit();
__kvm_hyp_host_forward_smc(host_ctxt);
+ trace_hyp_enter();
}
static void handle_host_smc(struct kvm_cpu_context *host_ctxt)
@@ -740,6 +743,8 @@ void handle_trap(struct kvm_cpu_context *host_ctxt)
{
u64 esr = read_sysreg_el2(SYS_ESR);
+ trace_hyp_enter();
+
switch (ESR_ELx_EC(esr)) {
case ESR_ELx_EC_HVC64:
handle_host_hcall(host_ctxt);
@@ -754,4 +759,6 @@ void handle_trap(struct kvm_cpu_context *host_ctxt)
default:
BUG();
}
+
+ trace_hyp_exit();
}
diff --git a/arch/arm64/kvm/hyp/nvhe/psci-relay.c b/arch/arm64/kvm/hyp/nvhe/psci-relay.c
index c3e196fb8b18..64d1d418df1d 100644
--- a/arch/arm64/kvm/hyp/nvhe/psci-relay.c
+++ b/arch/arm64/kvm/hyp/nvhe/psci-relay.c
@@ -6,11 +6,12 @@
#include <asm/kvm_asm.h>
#include <asm/kvm_hyp.h>
+#include <asm/kvm_hypevents.h>
#include <asm/kvm_mmu.h>
-#include <linux/arm-smccc.h>
#include <linux/kvm_host.h>
#include <uapi/linux/psci.h>
+#include <nvhe/arm-smccc.h>
#include <nvhe/memory.h>
#include <nvhe/trap_handler.h>
@@ -205,6 +206,7 @@ asmlinkage void __noreturn __kvm_host_psci_cpu_entry(bool is_cpu_on)
struct psci_boot_args *boot_args;
struct kvm_cpu_context *host_ctxt;
+ trace_hyp_enter();
host_ctxt = host_data_ptr(host_ctxt);
if (is_cpu_on)
@@ -221,6 +223,7 @@ asmlinkage void __noreturn __kvm_host_psci_cpu_entry(bool is_cpu_on)
write_sysreg_el1(INIT_SCTLR_EL1_MMU_OFF, SYS_SCTLR);
write_sysreg(INIT_PSTATE_EL1, SPSR_EL2);
+ trace_hyp_exit();
__host_enter(host_ctxt);
}
diff --git a/arch/arm64/kvm/hyp/nvhe/switch.c b/arch/arm64/kvm/hyp/nvhe/switch.c
index d3b9ec8a7c28..66ed30135815 100644
--- a/arch/arm64/kvm/hyp/nvhe/switch.c
+++ b/arch/arm64/kvm/hyp/nvhe/switch.c
@@ -7,7 +7,6 @@
#include <hyp/switch.h>
#include <hyp/sysreg-sr.h>
-#include <linux/arm-smccc.h>
#include <linux/kvm_host.h>
#include <linux/types.h>
#include <linux/jump_label.h>
@@ -21,6 +20,7 @@
#include <asm/kvm_asm.h>
#include <asm/kvm_emulate.h>
#include <asm/kvm_hyp.h>
+#include <asm/kvm_hypevents.h>
#include <asm/kvm_mmu.h>
#include <asm/fpsimd.h>
#include <asm/debug-monitors.h>
@@ -308,10 +308,13 @@ int __kvm_vcpu_run(struct kvm_vcpu *vcpu)
__debug_switch_to_guest(vcpu);
do {
+ trace_hyp_exit();
+
/* Jump in the fire! */
exit_code = __guest_enter(vcpu);
/* And we're baaack! */
+ trace_hyp_enter();
} while (fixup_guest_exit(vcpu, &exit_code));
__sysreg_save_state_nvhe(guest_ctxt);
--
2.51.0.618.g983fd99d29-goog
^ permalink raw reply related [flat|nested] 33+ messages in thread* [PATCH v7 27/28] KVM: arm64: Add selftest event support to pKVM hyp
2025-10-03 13:37 [PATCH v7 00/28] Tracefs support for pKVM Vincent Donnefort
` (25 preceding siblings ...)
2025-10-03 13:38 ` [PATCH v7 26/28] KVM: arm64: Add hyp_enter/hyp_exit events to pKVM hyp Vincent Donnefort
@ 2025-10-03 13:38 ` Vincent Donnefort
2025-10-03 13:38 ` [PATCH v7 28/28] tracing: selftests: Add pKVM trace remote tests Vincent Donnefort
27 siblings, 0 replies; 33+ messages in thread
From: Vincent Donnefort @ 2025-10-03 13:38 UTC (permalink / raw)
To: rostedt, mhiramat, mathieu.desnoyers, linux-trace-kernel, maz,
oliver.upton, joey.gouly, suzuki.poulose, yuzenghui
Cc: kvmarm, linux-arm-kernel, jstultz, qperret, will, aneesh.kumar,
kernel-team, linux-kernel, Vincent Donnefort
Add a selftest event that can be triggered from a `write_event` tracefs
file. This intends to be used by trace remote selftests.
Signed-off-by: Vincent Donnefort <vdonnefort@google.com>
diff --git a/arch/arm64/include/asm/kvm_asm.h b/arch/arm64/include/asm/kvm_asm.h
index 79019e11f529..522cccef32b7 100644
--- a/arch/arm64/include/asm/kvm_asm.h
+++ b/arch/arm64/include/asm/kvm_asm.h
@@ -94,6 +94,7 @@ enum __kvm_host_smccc_func {
__KVM_HOST_SMCCC_FUNC___pkvm_reset_tracing,
__KVM_HOST_SMCCC_FUNC___pkvm_swap_reader_tracing,
__KVM_HOST_SMCCC_FUNC___pkvm_enable_event,
+ __KVM_HOST_SMCCC_FUNC___pkvm_write_event,
};
#define DECLARE_KVM_VHE_SYM(sym) extern char sym[]
diff --git a/arch/arm64/include/asm/kvm_hypevents.h b/arch/arm64/include/asm/kvm_hypevents.h
index ce3953bc884a..3d1244972869 100644
--- a/arch/arm64/include/asm/kvm_hypevents.h
+++ b/arch/arm64/include/asm/kvm_hypevents.h
@@ -24,4 +24,18 @@ HYP_EVENT(hyp_exit,
),
HE_PRINTK()
);
+
+#ifdef CONFIG_PKVM_SELFTESTS
+HYP_EVENT(selftest,
+ HE_PROTO(u64 id),
+ HE_STRUCT(
+ he_field(u64, id)
+ ),
+ HE_ASSIGN(
+ __entry->id = id;
+ ),
+ RE_PRINTK("id=%lld", __entry->id)
+);
#endif
+
+#endif /* __ARM64_KVM_HYPEVENTS_H_ */
diff --git a/arch/arm64/kvm/Kconfig b/arch/arm64/kvm/Kconfig
index e06358aaf1af..dc2ac3049428 100644
--- a/arch/arm64/kvm/Kconfig
+++ b/arch/arm64/kvm/Kconfig
@@ -45,6 +45,7 @@ menuconfig KVM
config NVHE_EL2_DEBUG
bool "Debug mode for non-VHE EL2 object"
depends on KVM
+ select PKVM_SELFTESTS
help
Say Y here to enable the debug mode for the non-VHE KVM EL2 object.
Failure reports will BUG() in the hypervisor. This is intended for
@@ -82,6 +83,15 @@ config PTDUMP_STAGE2_DEBUGFS
If in doubt, say N.
+config PKVM_SELFTESTS
+ bool "Protected KVM hypervisor selftests"
+ depends on KVM
+ default n
+ help
+ Say Y here to enable pKVM hypervisor testing infrastructure.
+
+ If unsure, say N.
+
config PKVM_TRACING
bool
depends on KVM
diff --git a/arch/arm64/kvm/hyp/nvhe/hyp-main.c b/arch/arm64/kvm/hyp/nvhe/hyp-main.c
index f8361520967b..81d8628a9047 100644
--- a/arch/arm64/kvm/hyp/nvhe/hyp-main.c
+++ b/arch/arm64/kvm/hyp/nvhe/hyp-main.c
@@ -631,6 +631,20 @@ static void handle___pkvm_enable_event(struct kvm_cpu_context *host_ctxt)
cpu_reg(host_ctxt, 1) = __pkvm_enable_event(id, enable);
}
+static void handle___pkvm_write_event(struct kvm_cpu_context *host_ctxt)
+{
+ int smc_ret = SMCCC_RET_NOT_SUPPORTED, ret = -EOPNOTSUPP;
+#ifdef CONFIG_PKVM_SELFTESTS
+ DECLARE_REG(u64, id, host_ctxt, 1);
+
+ trace_selftest(id);
+ smc_ret = SMCCC_RET_SUCCESS;
+ ret = 0;
+#endif
+ cpu_reg(host_ctxt, 0) = smc_ret;
+ cpu_reg(host_ctxt, 1) = ret;
+}
+
typedef void (*hcall_t)(struct kvm_cpu_context *);
#define HANDLE_FUNC(x) [__KVM_HOST_SMCCC_FUNC_##x] = (hcall_t)handle_##x
@@ -677,6 +691,7 @@ static const hcall_t host_hcall[] = {
HANDLE_FUNC(__pkvm_reset_tracing),
HANDLE_FUNC(__pkvm_swap_reader_tracing),
HANDLE_FUNC(__pkvm_enable_event),
+ HANDLE_FUNC(__pkvm_write_event),
};
static void handle_host_hcall(struct kvm_cpu_context *host_ctxt)
diff --git a/arch/arm64/kvm/hyp_trace.c b/arch/arm64/kvm/hyp_trace.c
index 73539f5b5e42..7d078f87f86d 100644
--- a/arch/arm64/kvm/hyp_trace.c
+++ b/arch/arm64/kvm/hyp_trace.c
@@ -318,8 +318,34 @@ static int hyp_trace_clock_show(struct seq_file *m, void *v)
}
DEFINE_SHOW_ATTRIBUTE(hyp_trace_clock);
+#ifdef CONFIG_PKVM_SELFTESTS
+static ssize_t hyp_trace_write_event_write(struct file *f, const char __user *ubuf,
+ size_t cnt, loff_t *pos)
+{
+ unsigned long val;
+ int ret;
+
+ ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
+ if (ret)
+ return ret;
+
+ ret = kvm_call_hyp_nvhe(__pkvm_write_event, val);
+ if (ret)
+ return ret;
+
+ return cnt;
+}
+
+static const struct file_operations hyp_trace_write_event_fops = {
+ .write = hyp_trace_write_event_write,
+};
+#endif
+
static int hyp_trace_init_tracefs(struct dentry *d, void *priv)
{
+#ifdef CONFIG_PKVM_SELFTESTS
+ tracefs_create_file("write_event", 0200, d, NULL, &hyp_trace_write_event_fops);
+#endif
return tracefs_create_file("trace_clock", 0440, d, NULL, &hyp_trace_clock_fops) ?
0 : -ENOMEM;
}
diff --git a/tools/testing/selftests/ftrace/test.d/remotes/pkvm/trace.tc b/tools/testing/selftests/ftrace/test.d/remotes/pkvm/trace.tc
new file mode 100644
index 000000000000..49dca7c3861a
--- /dev/null
+++ b/tools/testing/selftests/ftrace/test.d/remotes/pkvm/trace.tc
@@ -0,0 +1,10 @@
+#!/bin/sh
+# SPDX-License-Identifier: GPL-2.0
+# description: Test pkvm hypervisor tracing pipe
+
+SOURCE_REMOTE_TEST=1
+. $TEST_DIR/remotes/trace_pipe.tc
+
+set -e
+setup_remote "hypervisor"
+test_trace
--
2.51.0.618.g983fd99d29-goog
^ permalink raw reply related [flat|nested] 33+ messages in thread* [PATCH v7 28/28] tracing: selftests: Add pKVM trace remote tests
2025-10-03 13:37 [PATCH v7 00/28] Tracefs support for pKVM Vincent Donnefort
` (26 preceding siblings ...)
2025-10-03 13:38 ` [PATCH v7 27/28] KVM: arm64: Add selftest event support " Vincent Donnefort
@ 2025-10-03 13:38 ` Vincent Donnefort
27 siblings, 0 replies; 33+ messages in thread
From: Vincent Donnefort @ 2025-10-03 13:38 UTC (permalink / raw)
To: rostedt, mhiramat, mathieu.desnoyers, linux-trace-kernel, maz,
oliver.upton, joey.gouly, suzuki.poulose, yuzenghui
Cc: kvmarm, linux-arm-kernel, jstultz, qperret, will, aneesh.kumar,
kernel-team, linux-kernel, Vincent Donnefort, Shuah Khan,
linux-kselftest
Run the trace remote selftests with the pKVM trace remote "hypervisor".
Cc: Shuah Khan <skhan@linuxfoundation.org>
Cc: linux-kselftest@vger.kernel.org
Signed-off-by: Vincent Donnefort <vdonnefort@google.com>
diff --git a/tools/testing/selftests/ftrace/test.d/remotes/pkvm/buffer_size.tc b/tools/testing/selftests/ftrace/test.d/remotes/pkvm/buffer_size.tc
new file mode 100644
index 000000000000..2de07e4d72fe
--- /dev/null
+++ b/tools/testing/selftests/ftrace/test.d/remotes/pkvm/buffer_size.tc
@@ -0,0 +1,11 @@
+#!/bin/sh
+# SPDX-License-Identifier: GPL-2.0
+# description: Test pkvm hypervisor trace buffer size
+# requires: remotes/hypervisor/write_event
+
+SOURCE_REMOTE_TEST=1
+. $TEST_DIR/remotes/buffer_size.tc
+
+set -e
+setup_remote "hypervisor"
+test_buffer_size
diff --git a/tools/testing/selftests/ftrace/test.d/remotes/pkvm/reset.tc b/tools/testing/selftests/ftrace/test.d/remotes/pkvm/reset.tc
new file mode 100644
index 000000000000..48afc51627e8
--- /dev/null
+++ b/tools/testing/selftests/ftrace/test.d/remotes/pkvm/reset.tc
@@ -0,0 +1,11 @@
+#!/bin/sh
+# SPDX-License-Identifier: GPL-2.0
+# description: Test pkvm hypervisor trace buffer reset
+# requires: remotes/hypervisor/write_event
+
+SOURCE_REMOTE_TEST=1
+. $TEST_DIR/remotes/reset.tc
+
+set -e
+setup_remote "hypervisor"
+test_reset
diff --git a/tools/testing/selftests/ftrace/test.d/remotes/pkvm/trace.tc b/tools/testing/selftests/ftrace/test.d/remotes/pkvm/trace.tc
index 49dca7c3861a..00aed1c2e650 100644
--- a/tools/testing/selftests/ftrace/test.d/remotes/pkvm/trace.tc
+++ b/tools/testing/selftests/ftrace/test.d/remotes/pkvm/trace.tc
@@ -1,9 +1,10 @@
#!/bin/sh
# SPDX-License-Identifier: GPL-2.0
-# description: Test pkvm hypervisor tracing pipe
+# description: Test pkvm hypervisor non-consuming trace read
+# requires: remotes/hypervisor/write_event
SOURCE_REMOTE_TEST=1
-. $TEST_DIR/remotes/trace_pipe.tc
+. $TEST_DIR/remotes/trace.tc
set -e
setup_remote "hypervisor"
diff --git a/tools/testing/selftests/ftrace/test.d/remotes/pkvm/trace_pipe.tc b/tools/testing/selftests/ftrace/test.d/remotes/pkvm/trace_pipe.tc
new file mode 100644
index 000000000000..b63339aca380
--- /dev/null
+++ b/tools/testing/selftests/ftrace/test.d/remotes/pkvm/trace_pipe.tc
@@ -0,0 +1,11 @@
+#!/bin/sh
+# SPDX-License-Identifier: GPL-2.0
+# description: Test pkvm hypervisor consuming trace read
+# requires: remotes/hypervisor/write_event
+
+SOURCE_REMOTE_TEST=1
+. $TEST_DIR/remotes/trace_pipe.tc
+
+set -e
+setup_remote "hypervisor"
+test_trace_pipe
diff --git a/tools/testing/selftests/ftrace/test.d/remotes/pkvm/unloading.tc b/tools/testing/selftests/ftrace/test.d/remotes/pkvm/unloading.tc
new file mode 100644
index 000000000000..eb1640a927cc
--- /dev/null
+++ b/tools/testing/selftests/ftrace/test.d/remotes/pkvm/unloading.tc
@@ -0,0 +1,11 @@
+#!/bin/sh
+# SPDX-License-Identifier: GPL-2.0
+# description: Test pkvm hypervisor trace buffer unloading
+# requires: remotes/hypervisor/write_event
+
+SOURCE_REMOTE_TEST=1
+. $TEST_DIR/remotes/unloading.tc
+
+set -e
+setup_remote "hypervisor"
+test_unloading
--
2.51.0.618.g983fd99d29-goog
^ permalink raw reply related [flat|nested] 33+ messages in thread