qemu-devel.nongnu.org archive mirror
 help / color / mirror / Atom feed
From: Pierrick Bouvier <pierrick.bouvier@linaro.org>
To: qemu-devel@nongnu.org
Cc: "Pierrick Bouvier" <pierrick.bouvier@linaro.org>,
	"Alex Bennée" <alex.bennee@linaro.org>,
	"Mahmoud Mandour" <ma.mandourr@gmail.com>,
	"Richard Henderson" <richard.henderson@linaro.org>,
	"Paolo Bonzini" <pbonzini@redhat.com>,
	"Alexandre Iooss" <erdnaxe@crans.org>
Subject: [PATCH 04/12] tests/plugin/inline: migrate to new per_vcpu API
Date: Thu, 11 Jan 2024 18:23:17 +0400	[thread overview]
Message-ID: <20240111142326.1743444-5-pierrick.bouvier@linaro.org> (raw)
In-Reply-To: <20240111142326.1743444-1-pierrick.bouvier@linaro.org>

Signed-off-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
---
 tests/plugin/inline.c | 17 -----------------
 1 file changed, 17 deletions(-)

diff --git a/tests/plugin/inline.c b/tests/plugin/inline.c
index 6114ebca545..ae59f7af7a7 100644
--- a/tests/plugin/inline.c
+++ b/tests/plugin/inline.c
@@ -18,15 +18,12 @@
 static uint64_t count_tb;
 static uint64_t count_tb_per_vcpu[MAX_CPUS];
 static uint64_t count_tb_inline_per_vcpu[MAX_CPUS];
-static uint64_t count_tb_inline_racy;
 static uint64_t count_insn;
 static uint64_t count_insn_per_vcpu[MAX_CPUS];
 static uint64_t count_insn_inline_per_vcpu[MAX_CPUS];
-static uint64_t count_insn_inline_racy;
 static uint64_t count_mem;
 static uint64_t count_mem_per_vcpu[MAX_CPUS];
 static uint64_t count_mem_inline_per_vcpu[MAX_CPUS];
-static uint64_t count_mem_inline_racy;
 static GMutex tb_lock;
 static GMutex insn_lock;
 static GMutex mem_lock;
@@ -50,11 +47,9 @@ static void stats_insn(void)
     printf("insn: %" PRIu64 "\n", expected);
     printf("insn: %" PRIu64 " (per vcpu)\n", per_vcpu);
     printf("insn: %" PRIu64 " (per vcpu inline)\n", inl_per_vcpu);
-    printf("insn: %" PRIu64 " (inline racy)\n", count_insn_inline_racy);
     g_assert(expected > 0);
     g_assert(per_vcpu == expected);
     g_assert(inl_per_vcpu == expected);
-    g_assert(count_insn_inline_racy <= expected);
 }
 
 static void stats_tb(void)
@@ -65,11 +60,9 @@ static void stats_tb(void)
     printf("tb: %" PRIu64 "\n", expected);
     printf("tb: %" PRIu64 " (per vcpu)\n", per_vcpu);
     printf("tb: %" PRIu64 " (per vcpu inline)\n", inl_per_vcpu);
-    printf("tb: %" PRIu64 " (inline racy)\n", count_tb_inline_racy);
     g_assert(expected > 0);
     g_assert(per_vcpu == expected);
     g_assert(inl_per_vcpu == expected);
-    g_assert(count_tb_inline_racy <= expected);
 }
 
 static void stats_mem(void)
@@ -80,11 +73,9 @@ static void stats_mem(void)
     printf("mem: %" PRIu64 "\n", expected);
     printf("mem: %" PRIu64 " (per vcpu)\n", per_vcpu);
     printf("mem: %" PRIu64 " (per vcpu inline)\n", inl_per_vcpu);
-    printf("mem: %" PRIu64 " (inline racy)\n", count_mem_inline_racy);
     g_assert(expected > 0);
     g_assert(per_vcpu == expected);
     g_assert(inl_per_vcpu == expected);
-    g_assert(count_mem_inline_racy <= expected);
 }
 
 static void plugin_exit(qemu_plugin_id_t id, void *udata)
@@ -142,8 +133,6 @@ static void vcpu_tb_trans(qemu_plugin_id_t id, struct qemu_plugin_tb *tb)
 {
     qemu_plugin_register_vcpu_tb_exec_cb(tb, vcpu_tb_exec,
                                          QEMU_PLUGIN_CB_NO_REGS, 0);
-    qemu_plugin_register_vcpu_tb_exec_inline(tb, QEMU_PLUGIN_INLINE_ADD_U64,
-                                             &count_tb_inline_racy, 1);
     qemu_plugin_register_vcpu_tb_exec_inline_per_vcpu(
         tb, QEMU_PLUGIN_INLINE_ADD_U64,
         count_tb_inline_per_vcpu, sizeof(uint64_t), 1);
@@ -152,18 +141,12 @@ static void vcpu_tb_trans(qemu_plugin_id_t id, struct qemu_plugin_tb *tb)
         struct qemu_plugin_insn *insn = qemu_plugin_tb_get_insn(tb, idx);
         qemu_plugin_register_vcpu_insn_exec_cb(insn, vcpu_insn_exec,
                                                QEMU_PLUGIN_CB_NO_REGS, 0);
-        qemu_plugin_register_vcpu_insn_exec_inline(
-            insn, QEMU_PLUGIN_INLINE_ADD_U64,
-            &count_insn_inline_racy, 1);
         qemu_plugin_register_vcpu_insn_exec_inline_per_vcpu(
             insn, QEMU_PLUGIN_INLINE_ADD_U64,
             count_insn_inline_per_vcpu, sizeof(uint64_t), 1);
         qemu_plugin_register_vcpu_mem_cb(insn, &vcpu_mem_access,
                                          QEMU_PLUGIN_CB_NO_REGS,
                                          QEMU_PLUGIN_MEM_RW, 0);
-        qemu_plugin_register_vcpu_mem_inline(insn, QEMU_PLUGIN_MEM_RW,
-                                             QEMU_PLUGIN_INLINE_ADD_U64,
-                                             &count_mem_inline_racy, 1);
         qemu_plugin_register_vcpu_mem_inline_per_vcpu(
             insn, QEMU_PLUGIN_MEM_RW,
             QEMU_PLUGIN_INLINE_ADD_U64,
-- 
2.43.0



  parent reply	other threads:[~2024-01-11 14:25 UTC|newest]

Thread overview: 35+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2024-01-11 14:23 [PATCH 00/12] TCG Plugin inline operation enhancement Pierrick Bouvier
2024-01-11 14:23 ` [PATCH 01/12] plugins: implement inline operation with cpu_index offset Pierrick Bouvier
2024-01-11 22:04   ` Richard Henderson
2024-01-12 14:27     ` Pierrick Bouvier
2024-01-12 22:22       ` Richard Henderson
2024-01-11 14:23 ` [PATCH 02/12] plugins: add inline operation per vcpu Pierrick Bouvier
2024-01-11 22:08   ` Richard Henderson
2024-01-11 14:23 ` [PATCH 03/12] tests/plugin: add test plugin for inline operations Pierrick Bouvier
2024-01-11 15:57   ` Philippe Mathieu-Daudé
2024-01-11 17:20     ` Pierrick Bouvier
2024-01-12 17:20       ` Alex Bennée
2024-01-13  5:16         ` Pierrick Bouvier
2024-01-13 17:16           ` Alex Bennée
2024-01-15  7:06             ` Pierrick Bouvier
2024-01-15  9:04               ` Alex Bennée
2024-01-16  7:46                 ` Pierrick Bouvier
2024-01-11 14:23 ` Pierrick Bouvier [this message]
2024-01-11 22:10   ` [PATCH 04/12] tests/plugin/inline: migrate to new per_vcpu API Richard Henderson
2024-01-12  3:51     ` Pierrick Bouvier
2024-01-12  8:40       ` Richard Henderson
2024-01-12  8:58         ` Pierrick Bouvier
2024-01-11 14:23 ` [PATCH 05/12] tests/plugin/mem: fix race condition with callbacks Pierrick Bouvier
2024-01-11 22:12   ` Richard Henderson
2024-01-11 14:23 ` [PATCH 06/12] tests/plugin/mem: migrate to new per_vcpu API Pierrick Bouvier
2024-01-11 14:23 ` [PATCH 07/12] tests/plugin/insn: " Pierrick Bouvier
2024-01-11 22:14   ` Richard Henderson
2024-01-11 14:23 ` [PATCH 08/12] tests/plugin/bb: " Pierrick Bouvier
2024-01-11 22:15   ` Richard Henderson
2024-01-11 14:23 ` [PATCH 09/12] contrib/plugins/hotblocks: " Pierrick Bouvier
2024-01-12  8:42   ` Richard Henderson
2024-01-11 14:23 ` [PATCH 10/12] contrib/plugins/howvec: " Pierrick Bouvier
2024-01-12  8:44   ` Richard Henderson
2024-01-11 14:23 ` [PATCH 11/12] plugins: remove non per_vcpu inline operation from API Pierrick Bouvier
2024-01-11 14:23 ` [PATCH 12/12] MAINTAINERS: Add myself as reviewer for TCG Plugins Pierrick Bouvier
2024-01-12 15:53   ` Philippe Mathieu-Daudé

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20240111142326.1743444-5-pierrick.bouvier@linaro.org \
    --to=pierrick.bouvier@linaro.org \
    --cc=alex.bennee@linaro.org \
    --cc=erdnaxe@crans.org \
    --cc=ma.mandourr@gmail.com \
    --cc=pbonzini@redhat.com \
    --cc=qemu-devel@nongnu.org \
    --cc=richard.henderson@linaro.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).