From: Pierrick Bouvier <pierrick.bouvier@linaro.org>
To: qemu-devel@nongnu.org
Cc: "Pierrick Bouvier" <pierrick.bouvier@linaro.org>,
"Alex Bennée" <alex.bennee@linaro.org>,
"Mahmoud Mandour" <ma.mandourr@gmail.com>,
"Richard Henderson" <richard.henderson@linaro.org>,
"Paolo Bonzini" <pbonzini@redhat.com>,
"Alexandre Iooss" <erdnaxe@crans.org>
Subject: [PATCH 03/12] tests/plugin: add test plugin for inline operations
Date: Thu, 11 Jan 2024 18:23:16 +0400 [thread overview]
Message-ID: <20240111142326.1743444-4-pierrick.bouvier@linaro.org> (raw)
In-Reply-To: <20240111142326.1743444-1-pierrick.bouvier@linaro.org>
For now, it simply performs instruction, bb and mem count, and ensure
that inline vs callback versions have the same result. Later, we'll
extend it when new inline operations are added.
Use existing plugins to test everything works is a bit cumbersome, as
different events are treated in different plugins. Thus, this new one.
Signed-off-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
---
tests/plugin/inline.c | 183 +++++++++++++++++++++++++++++++++++++++
tests/plugin/meson.build | 2 +-
2 files changed, 184 insertions(+), 1 deletion(-)
create mode 100644 tests/plugin/inline.c
diff --git a/tests/plugin/inline.c b/tests/plugin/inline.c
new file mode 100644
index 00000000000..6114ebca545
--- /dev/null
+++ b/tests/plugin/inline.c
@@ -0,0 +1,183 @@
+/*
+ * Copyright (C) 2023, Pierrick Bouvier <pierrick.bouvier@linaro.org>
+ *
+ * Demonstrates and tests usage of inline ops.
+ *
+ * License: GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ */
+
+#include <glib.h>
+#include <stdint.h>
+#include <stdio.h>
+
+#include <qemu-plugin.h>
+
+#define MAX_CPUS 8
+
+static uint64_t count_tb;
+static uint64_t count_tb_per_vcpu[MAX_CPUS];
+static uint64_t count_tb_inline_per_vcpu[MAX_CPUS];
+static uint64_t count_tb_inline_racy;
+static uint64_t count_insn;
+static uint64_t count_insn_per_vcpu[MAX_CPUS];
+static uint64_t count_insn_inline_per_vcpu[MAX_CPUS];
+static uint64_t count_insn_inline_racy;
+static uint64_t count_mem;
+static uint64_t count_mem_per_vcpu[MAX_CPUS];
+static uint64_t count_mem_inline_per_vcpu[MAX_CPUS];
+static uint64_t count_mem_inline_racy;
+static GMutex tb_lock;
+static GMutex insn_lock;
+static GMutex mem_lock;
+
+QEMU_PLUGIN_EXPORT int qemu_plugin_version = QEMU_PLUGIN_VERSION;
+
+static uint64_t collect_per_vcpu(uint64_t *values)
+{
+ uint64_t count = 0;
+ for (int i = 0; i < MAX_CPUS; ++i) {
+ count += values[i];
+ }
+ return count;
+}
+
+static void stats_insn(void)
+{
+ const uint64_t expected = count_insn;
+ const uint64_t per_vcpu = collect_per_vcpu(count_insn_per_vcpu);
+ const uint64_t inl_per_vcpu = collect_per_vcpu(count_insn_inline_per_vcpu);
+ printf("insn: %" PRIu64 "\n", expected);
+ printf("insn: %" PRIu64 " (per vcpu)\n", per_vcpu);
+ printf("insn: %" PRIu64 " (per vcpu inline)\n", inl_per_vcpu);
+ printf("insn: %" PRIu64 " (inline racy)\n", count_insn_inline_racy);
+ g_assert(expected > 0);
+ g_assert(per_vcpu == expected);
+ g_assert(inl_per_vcpu == expected);
+ g_assert(count_insn_inline_racy <= expected);
+}
+
+static void stats_tb(void)
+{
+ const uint64_t expected = count_tb;
+ const uint64_t per_vcpu = collect_per_vcpu(count_tb_per_vcpu);
+ const uint64_t inl_per_vcpu = collect_per_vcpu(count_tb_inline_per_vcpu);
+ printf("tb: %" PRIu64 "\n", expected);
+ printf("tb: %" PRIu64 " (per vcpu)\n", per_vcpu);
+ printf("tb: %" PRIu64 " (per vcpu inline)\n", inl_per_vcpu);
+ printf("tb: %" PRIu64 " (inline racy)\n", count_tb_inline_racy);
+ g_assert(expected > 0);
+ g_assert(per_vcpu == expected);
+ g_assert(inl_per_vcpu == expected);
+ g_assert(count_tb_inline_racy <= expected);
+}
+
+static void stats_mem(void)
+{
+ const uint64_t expected = count_mem;
+ const uint64_t per_vcpu = collect_per_vcpu(count_mem_per_vcpu);
+ const uint64_t inl_per_vcpu = collect_per_vcpu(count_mem_inline_per_vcpu);
+ printf("mem: %" PRIu64 "\n", expected);
+ printf("mem: %" PRIu64 " (per vcpu)\n", per_vcpu);
+ printf("mem: %" PRIu64 " (per vcpu inline)\n", inl_per_vcpu);
+ printf("mem: %" PRIu64 " (inline racy)\n", count_mem_inline_racy);
+ g_assert(expected > 0);
+ g_assert(per_vcpu == expected);
+ g_assert(inl_per_vcpu == expected);
+ g_assert(count_mem_inline_racy <= expected);
+}
+
+static void plugin_exit(qemu_plugin_id_t id, void *udata)
+{
+ for (int i = 0; i < MAX_CPUS; ++i) {
+ const uint64_t tb = count_tb_per_vcpu[i];
+ const uint64_t tb_inline = count_tb_inline_per_vcpu[i];
+ const uint64_t insn = count_insn_per_vcpu[i];
+ const uint64_t insn_inline = count_insn_inline_per_vcpu[i];
+ const uint64_t mem = count_mem_per_vcpu[i];
+ const uint64_t mem_inline = count_mem_inline_per_vcpu[i];
+ printf("cpu %d: tb (%" PRIu64 ", %" PRIu64 ") | "
+ "insn (%" PRIu64 ", %" PRIu64 ") | "
+ "mem (%" PRIu64 ", %" PRIu64 ")"
+ "\n",
+ i, tb, tb_inline, insn, insn_inline, mem, mem_inline);
+ g_assert(tb == tb_inline);
+ g_assert(insn == insn_inline);
+ g_assert(mem == mem_inline);
+ }
+
+ stats_tb();
+ stats_insn();
+ stats_mem();
+}
+
+static void vcpu_tb_exec(unsigned int cpu_index, void *udata)
+{
+ count_tb_per_vcpu[cpu_index]++;
+ g_mutex_lock(&tb_lock);
+ count_tb++;
+ g_mutex_unlock(&tb_lock);
+}
+
+static void vcpu_insn_exec(unsigned int cpu_index, void *udata)
+{
+ count_insn_per_vcpu[cpu_index]++;
+ g_mutex_lock(&insn_lock);
+ count_insn++;
+ g_mutex_unlock(&insn_lock);
+}
+
+static void vcpu_mem_access(unsigned int cpu_index,
+ qemu_plugin_meminfo_t info,
+ uint64_t vaddr,
+ void *userdata)
+{
+ count_mem_per_vcpu[cpu_index]++;
+ g_mutex_lock(&mem_lock);
+ count_mem++;
+ g_mutex_unlock(&mem_lock);
+}
+
+static void vcpu_tb_trans(qemu_plugin_id_t id, struct qemu_plugin_tb *tb)
+{
+ qemu_plugin_register_vcpu_tb_exec_cb(tb, vcpu_tb_exec,
+ QEMU_PLUGIN_CB_NO_REGS, 0);
+ qemu_plugin_register_vcpu_tb_exec_inline(tb, QEMU_PLUGIN_INLINE_ADD_U64,
+ &count_tb_inline_racy, 1);
+ qemu_plugin_register_vcpu_tb_exec_inline_per_vcpu(
+ tb, QEMU_PLUGIN_INLINE_ADD_U64,
+ count_tb_inline_per_vcpu, sizeof(uint64_t), 1);
+
+ for (int idx = 0; idx < qemu_plugin_tb_n_insns(tb); ++idx) {
+ struct qemu_plugin_insn *insn = qemu_plugin_tb_get_insn(tb, idx);
+ qemu_plugin_register_vcpu_insn_exec_cb(insn, vcpu_insn_exec,
+ QEMU_PLUGIN_CB_NO_REGS, 0);
+ qemu_plugin_register_vcpu_insn_exec_inline(
+ insn, QEMU_PLUGIN_INLINE_ADD_U64,
+ &count_insn_inline_racy, 1);
+ qemu_plugin_register_vcpu_insn_exec_inline_per_vcpu(
+ insn, QEMU_PLUGIN_INLINE_ADD_U64,
+ count_insn_inline_per_vcpu, sizeof(uint64_t), 1);
+ qemu_plugin_register_vcpu_mem_cb(insn, &vcpu_mem_access,
+ QEMU_PLUGIN_CB_NO_REGS,
+ QEMU_PLUGIN_MEM_RW, 0);
+ qemu_plugin_register_vcpu_mem_inline(insn, QEMU_PLUGIN_MEM_RW,
+ QEMU_PLUGIN_INLINE_ADD_U64,
+ &count_mem_inline_racy, 1);
+ qemu_plugin_register_vcpu_mem_inline_per_vcpu(
+ insn, QEMU_PLUGIN_MEM_RW,
+ QEMU_PLUGIN_INLINE_ADD_U64,
+ count_mem_inline_per_vcpu, sizeof(uint64_t), 1);
+ }
+}
+
+QEMU_PLUGIN_EXPORT
+int qemu_plugin_install(qemu_plugin_id_t id, const qemu_info_t *info,
+ int argc, char **argv)
+{
+ g_assert(info->system.smp_vcpus <= MAX_CPUS);
+ qemu_plugin_register_vcpu_tb_trans_cb(id, vcpu_tb_trans);
+ qemu_plugin_register_atexit_cb(id, plugin_exit, NULL);
+
+ return 0;
+}
diff --git a/tests/plugin/meson.build b/tests/plugin/meson.build
index e18183aaeda..9eece5bab51 100644
--- a/tests/plugin/meson.build
+++ b/tests/plugin/meson.build
@@ -1,6 +1,6 @@
t = []
if get_option('plugins')
- foreach i : ['bb', 'empty', 'insn', 'mem', 'syscall']
+ foreach i : ['bb', 'empty', 'inline', 'insn', 'mem', 'syscall']
if host_os == 'windows'
t += shared_module(i, files(i + '.c') + '../../contrib/plugins/win32_linker.c',
include_directories: '../../include/qemu',
--
2.43.0
next prev parent reply other threads:[~2024-01-11 14:25 UTC|newest]
Thread overview: 35+ messages / expand[flat|nested] mbox.gz Atom feed top
2024-01-11 14:23 [PATCH 00/12] TCG Plugin inline operation enhancement Pierrick Bouvier
2024-01-11 14:23 ` [PATCH 01/12] plugins: implement inline operation with cpu_index offset Pierrick Bouvier
2024-01-11 22:04 ` Richard Henderson
2024-01-12 14:27 ` Pierrick Bouvier
2024-01-12 22:22 ` Richard Henderson
2024-01-11 14:23 ` [PATCH 02/12] plugins: add inline operation per vcpu Pierrick Bouvier
2024-01-11 22:08 ` Richard Henderson
2024-01-11 14:23 ` Pierrick Bouvier [this message]
2024-01-11 15:57 ` [PATCH 03/12] tests/plugin: add test plugin for inline operations Philippe Mathieu-Daudé
2024-01-11 17:20 ` Pierrick Bouvier
2024-01-12 17:20 ` Alex Bennée
2024-01-13 5:16 ` Pierrick Bouvier
2024-01-13 17:16 ` Alex Bennée
2024-01-15 7:06 ` Pierrick Bouvier
2024-01-15 9:04 ` Alex Bennée
2024-01-16 7:46 ` Pierrick Bouvier
2024-01-11 14:23 ` [PATCH 04/12] tests/plugin/inline: migrate to new per_vcpu API Pierrick Bouvier
2024-01-11 22:10 ` Richard Henderson
2024-01-12 3:51 ` Pierrick Bouvier
2024-01-12 8:40 ` Richard Henderson
2024-01-12 8:58 ` Pierrick Bouvier
2024-01-11 14:23 ` [PATCH 05/12] tests/plugin/mem: fix race condition with callbacks Pierrick Bouvier
2024-01-11 22:12 ` Richard Henderson
2024-01-11 14:23 ` [PATCH 06/12] tests/plugin/mem: migrate to new per_vcpu API Pierrick Bouvier
2024-01-11 14:23 ` [PATCH 07/12] tests/plugin/insn: " Pierrick Bouvier
2024-01-11 22:14 ` Richard Henderson
2024-01-11 14:23 ` [PATCH 08/12] tests/plugin/bb: " Pierrick Bouvier
2024-01-11 22:15 ` Richard Henderson
2024-01-11 14:23 ` [PATCH 09/12] contrib/plugins/hotblocks: " Pierrick Bouvier
2024-01-12 8:42 ` Richard Henderson
2024-01-11 14:23 ` [PATCH 10/12] contrib/plugins/howvec: " Pierrick Bouvier
2024-01-12 8:44 ` Richard Henderson
2024-01-11 14:23 ` [PATCH 11/12] plugins: remove non per_vcpu inline operation from API Pierrick Bouvier
2024-01-11 14:23 ` [PATCH 12/12] MAINTAINERS: Add myself as reviewer for TCG Plugins Pierrick Bouvier
2024-01-12 15:53 ` Philippe Mathieu-Daudé
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20240111142326.1743444-4-pierrick.bouvier@linaro.org \
--to=pierrick.bouvier@linaro.org \
--cc=alex.bennee@linaro.org \
--cc=erdnaxe@crans.org \
--cc=ma.mandourr@gmail.com \
--cc=pbonzini@redhat.com \
--cc=qemu-devel@nongnu.org \
--cc=richard.henderson@linaro.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).