qemu-devel.nongnu.org archive mirror
 help / color / mirror / Atom feed
From: Pierrick Bouvier <pierrick.bouvier@linaro.org>
To: Richard Henderson <richard.henderson@linaro.org>, qemu-devel@nongnu.org
Cc: "Alex Bennée" <alex.bennee@linaro.org>,
	"Mahmoud Mandour" <ma.mandourr@gmail.com>,
	"Paolo Bonzini" <pbonzini@redhat.com>,
	"Alexandre Iooss" <erdnaxe@crans.org>
Subject: Re: [PATCH 04/12] tests/plugin/inline: migrate to new per_vcpu API
Date: Fri, 12 Jan 2024 12:58:27 +0400	[thread overview]
Message-ID: <994ee352-3e4b-4ac3-996f-2c190b5685d9@linaro.org> (raw)
In-Reply-To: <b495146f-c523-4a97-b703-12b226251af5@linaro.org>

On 1/12/24 12:40, Richard Henderson wrote:
> On 1/12/24 14:51, Pierrick Bouvier wrote:
>> On 1/12/24 02:10, Richard Henderson wrote:
>>> On 1/12/24 01:23, Pierrick Bouvier wrote:
>>>> Signed-off-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
>>>> ---
>>>>     tests/plugin/inline.c | 17 -----------------
>>>>     1 file changed, 17 deletions(-)
>>>
>>> Was this supposed to be together with patch 6?
>>>
>>
>> My goal was to have a version that still uses original API.
>> If you prefer this to be squashed, no problem to do it.
> 
> My confusion is that this patch does not "migrate" anything -- it only removes code.  Is
> the just that the description is inaccurate?  But it appears that the combination of 4+6
> would "migrate" to the new API.
> 

You're right, the commit message is incorrect, as it is just removing 
the use of old API. Well, I think having this in a split commit does not 
create any value for this serie, so I'll simply squash this in previous one.

> 
> r~
> 
>>
>>> r~
>>>
>>>>
>>>> diff --git a/tests/plugin/inline.c b/tests/plugin/inline.c
>>>> index 6114ebca545..ae59f7af7a7 100644
>>>> --- a/tests/plugin/inline.c
>>>> +++ b/tests/plugin/inline.c
>>>> @@ -18,15 +18,12 @@
>>>>     static uint64_t count_tb;
>>>>     static uint64_t count_tb_per_vcpu[MAX_CPUS];
>>>>     static uint64_t count_tb_inline_per_vcpu[MAX_CPUS];
>>>> -static uint64_t count_tb_inline_racy;
>>>>     static uint64_t count_insn;
>>>>     static uint64_t count_insn_per_vcpu[MAX_CPUS];
>>>>     static uint64_t count_insn_inline_per_vcpu[MAX_CPUS];
>>>> -static uint64_t count_insn_inline_racy;
>>>>     static uint64_t count_mem;
>>>>     static uint64_t count_mem_per_vcpu[MAX_CPUS];
>>>>     static uint64_t count_mem_inline_per_vcpu[MAX_CPUS];
>>>> -static uint64_t count_mem_inline_racy;
>>>>     static GMutex tb_lock;
>>>>     static GMutex insn_lock;
>>>>     static GMutex mem_lock;
>>>> @@ -50,11 +47,9 @@ static void stats_insn(void)
>>>>         printf("insn: %" PRIu64 "\n", expected);
>>>>         printf("insn: %" PRIu64 " (per vcpu)\n", per_vcpu);
>>>>         printf("insn: %" PRIu64 " (per vcpu inline)\n", inl_per_vcpu);
>>>> -    printf("insn: %" PRIu64 " (inline racy)\n", count_insn_inline_racy);
>>>>         g_assert(expected > 0);
>>>>         g_assert(per_vcpu == expected);
>>>>         g_assert(inl_per_vcpu == expected);
>>>> -    g_assert(count_insn_inline_racy <= expected);
>>>>     }
>>>>     static void stats_tb(void)
>>>> @@ -65,11 +60,9 @@ static void stats_tb(void)
>>>>         printf("tb: %" PRIu64 "\n", expected);
>>>>         printf("tb: %" PRIu64 " (per vcpu)\n", per_vcpu);
>>>>         printf("tb: %" PRIu64 " (per vcpu inline)\n", inl_per_vcpu);
>>>> -    printf("tb: %" PRIu64 " (inline racy)\n", count_tb_inline_racy);
>>>>         g_assert(expected > 0);
>>>>         g_assert(per_vcpu == expected);
>>>>         g_assert(inl_per_vcpu == expected);
>>>> -    g_assert(count_tb_inline_racy <= expected);
>>>>     }
>>>>     static void stats_mem(void)
>>>> @@ -80,11 +73,9 @@ static void stats_mem(void)
>>>>         printf("mem: %" PRIu64 "\n", expected);
>>>>         printf("mem: %" PRIu64 " (per vcpu)\n", per_vcpu);
>>>>         printf("mem: %" PRIu64 " (per vcpu inline)\n", inl_per_vcpu);
>>>> -    printf("mem: %" PRIu64 " (inline racy)\n", count_mem_inline_racy);
>>>>         g_assert(expected > 0);
>>>>         g_assert(per_vcpu == expected);
>>>>         g_assert(inl_per_vcpu == expected);
>>>> -    g_assert(count_mem_inline_racy <= expected);
>>>>     }
>>>>     static void plugin_exit(qemu_plugin_id_t id, void *udata)
>>>> @@ -142,8 +133,6 @@ static void vcpu_tb_trans(qemu_plugin_id_t id, struct
>>>> qemu_plugin_tb *tb)
>>>>     {
>>>>         qemu_plugin_register_vcpu_tb_exec_cb(tb, vcpu_tb_exec,
>>>>                                              QEMU_PLUGIN_CB_NO_REGS, 0);
>>>> -    qemu_plugin_register_vcpu_tb_exec_inline(tb, QEMU_PLUGIN_INLINE_ADD_U64,
>>>> -                                             &count_tb_inline_racy, 1);
>>>>         qemu_plugin_register_vcpu_tb_exec_inline_per_vcpu(
>>>>             tb, QEMU_PLUGIN_INLINE_ADD_U64,
>>>>             count_tb_inline_per_vcpu, sizeof(uint64_t), 1);
>>>> @@ -152,18 +141,12 @@ static void vcpu_tb_trans(qemu_plugin_id_t id, struct
>>>> qemu_plugin_tb *tb)
>>>>             struct qemu_plugin_insn *insn = qemu_plugin_tb_get_insn(tb, idx);
>>>>             qemu_plugin_register_vcpu_insn_exec_cb(insn, vcpu_insn_exec,
>>>>                                                    QEMU_PLUGIN_CB_NO_REGS, 0);
>>>> -        qemu_plugin_register_vcpu_insn_exec_inline(
>>>> -            insn, QEMU_PLUGIN_INLINE_ADD_U64,
>>>> -            &count_insn_inline_racy, 1);
>>>>             qemu_plugin_register_vcpu_insn_exec_inline_per_vcpu(
>>>>                 insn, QEMU_PLUGIN_INLINE_ADD_U64,
>>>>                 count_insn_inline_per_vcpu, sizeof(uint64_t), 1);
>>>>             qemu_plugin_register_vcpu_mem_cb(insn, &vcpu_mem_access,
>>>>                                              QEMU_PLUGIN_CB_NO_REGS,
>>>>                                              QEMU_PLUGIN_MEM_RW, 0);
>>>> -        qemu_plugin_register_vcpu_mem_inline(insn, QEMU_PLUGIN_MEM_RW,
>>>> -                                             QEMU_PLUGIN_INLINE_ADD_U64,
>>>> -                                             &count_mem_inline_racy, 1);
>>>>             qemu_plugin_register_vcpu_mem_inline_per_vcpu(
>>>>                 insn, QEMU_PLUGIN_MEM_RW,
>>>>                 QEMU_PLUGIN_INLINE_ADD_U64,
>>>
> 

  reply	other threads:[~2024-01-12  8:59 UTC|newest]

Thread overview: 35+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2024-01-11 14:23 [PATCH 00/12] TCG Plugin inline operation enhancement Pierrick Bouvier
2024-01-11 14:23 ` [PATCH 01/12] plugins: implement inline operation with cpu_index offset Pierrick Bouvier
2024-01-11 22:04   ` Richard Henderson
2024-01-12 14:27     ` Pierrick Bouvier
2024-01-12 22:22       ` Richard Henderson
2024-01-11 14:23 ` [PATCH 02/12] plugins: add inline operation per vcpu Pierrick Bouvier
2024-01-11 22:08   ` Richard Henderson
2024-01-11 14:23 ` [PATCH 03/12] tests/plugin: add test plugin for inline operations Pierrick Bouvier
2024-01-11 15:57   ` Philippe Mathieu-Daudé
2024-01-11 17:20     ` Pierrick Bouvier
2024-01-12 17:20       ` Alex Bennée
2024-01-13  5:16         ` Pierrick Bouvier
2024-01-13 17:16           ` Alex Bennée
2024-01-15  7:06             ` Pierrick Bouvier
2024-01-15  9:04               ` Alex Bennée
2024-01-16  7:46                 ` Pierrick Bouvier
2024-01-11 14:23 ` [PATCH 04/12] tests/plugin/inline: migrate to new per_vcpu API Pierrick Bouvier
2024-01-11 22:10   ` Richard Henderson
2024-01-12  3:51     ` Pierrick Bouvier
2024-01-12  8:40       ` Richard Henderson
2024-01-12  8:58         ` Pierrick Bouvier [this message]
2024-01-11 14:23 ` [PATCH 05/12] tests/plugin/mem: fix race condition with callbacks Pierrick Bouvier
2024-01-11 22:12   ` Richard Henderson
2024-01-11 14:23 ` [PATCH 06/12] tests/plugin/mem: migrate to new per_vcpu API Pierrick Bouvier
2024-01-11 14:23 ` [PATCH 07/12] tests/plugin/insn: " Pierrick Bouvier
2024-01-11 22:14   ` Richard Henderson
2024-01-11 14:23 ` [PATCH 08/12] tests/plugin/bb: " Pierrick Bouvier
2024-01-11 22:15   ` Richard Henderson
2024-01-11 14:23 ` [PATCH 09/12] contrib/plugins/hotblocks: " Pierrick Bouvier
2024-01-12  8:42   ` Richard Henderson
2024-01-11 14:23 ` [PATCH 10/12] contrib/plugins/howvec: " Pierrick Bouvier
2024-01-12  8:44   ` Richard Henderson
2024-01-11 14:23 ` [PATCH 11/12] plugins: remove non per_vcpu inline operation from API Pierrick Bouvier
2024-01-11 14:23 ` [PATCH 12/12] MAINTAINERS: Add myself as reviewer for TCG Plugins Pierrick Bouvier
2024-01-12 15:53   ` Philippe Mathieu-Daudé

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=994ee352-3e4b-4ac3-996f-2c190b5685d9@linaro.org \
    --to=pierrick.bouvier@linaro.org \
    --cc=alex.bennee@linaro.org \
    --cc=erdnaxe@crans.org \
    --cc=ma.mandourr@gmail.com \
    --cc=pbonzini@redhat.com \
    --cc=qemu-devel@nongnu.org \
    --cc=richard.henderson@linaro.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).