qemu-devel.nongnu.org archive mirror
 help / color / mirror / Atom feed
From: Richard Henderson <richard.henderson@linaro.org>
To: "Alex Bennée" <alex.bennee@linaro.org>
Cc: "Philippe Mathieu-Daudé" <f4bug@amsat.org>, qemu-devel@nongnu.org
Subject: Re: [PATCH v5 14/17] include/hw/core: Create struct CPUJumpCache
Date: Thu, 29 Sep 2022 09:22:07 -0700	[thread overview]
Message-ID: <9db697ee-f8e1-1388-7675-42c46ae98fca@linaro.org> (raw)
In-Reply-To: <87k05mz3xa.fsf@linaro.org>

On 9/29/22 06:46, Alex Bennée wrote:
> 
> Richard Henderson <richard.henderson@linaro.org> writes:
> 
>> Wrap the bare TranslationBlock pointer into a structure.
>>
>> Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
>> Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
>> ---
>>   include/hw/core/cpu.h     | 8 ++++++--
>>   accel/tcg/cpu-exec.c      | 9 ++++++---
>>   accel/tcg/cputlb.c        | 2 +-
>>   accel/tcg/translate-all.c | 4 ++--
>>   4 files changed, 15 insertions(+), 8 deletions(-)
>>
>> diff --git a/include/hw/core/cpu.h b/include/hw/core/cpu.h
>> index 9e47184513..ee5b75dea0 100644
>> --- a/include/hw/core/cpu.h
>> +++ b/include/hw/core/cpu.h
>> @@ -232,6 +232,10 @@ struct hvf_vcpu_state;
>>   #define TB_JMP_CACHE_BITS 12
>>   #define TB_JMP_CACHE_SIZE (1 << TB_JMP_CACHE_BITS)
>>   
>> +typedef struct {
>> +    TranslationBlock *tb;
>> +} CPUJumpCache;
>> +
> 
> I don't quite follow whats going on here. I see we add vaddr pc in a
> later patch but I don't quite see why a cache for looking up TBs gets a
> sidechan value added later.
> 
> Is this because the vaddr will no longer match the tb->pc? Maybe a
> comment on the structure is needed?

Correct, there will be no tb->pc, so the cpu has to remember the virtual address itself.

This patch only wraps the current pointer into a structure.


r~

> 
>>   /* work queue */
>>   
>>   /* The union type allows passing of 64 bit target pointers on 32 bit
>> @@ -361,7 +365,7 @@ struct CPUState {
>>       IcountDecr *icount_decr_ptr;
>>   
>>       /* Accessed in parallel; all accesses must be atomic */
>> -    TranslationBlock *tb_jmp_cache[TB_JMP_CACHE_SIZE];
>> +    CPUJumpCache tb_jmp_cache[TB_JMP_CACHE_SIZE];
>>   
>>       struct GDBRegisterState *gdb_regs;
>>       int gdb_num_regs;
>> @@ -452,7 +456,7 @@ static inline void cpu_tb_jmp_cache_clear(CPUState *cpu)
>>       unsigned int i;
>>   
>>       for (i = 0; i < TB_JMP_CACHE_SIZE; i++) {
>> -        qatomic_set(&cpu->tb_jmp_cache[i], NULL);
>> +        qatomic_set(&cpu->tb_jmp_cache[i].tb, NULL);
>>       }
>>   }
>>   
>> diff --git a/accel/tcg/cpu-exec.c b/accel/tcg/cpu-exec.c
>> index dd58a144a8..c6283d5798 100644
>> --- a/accel/tcg/cpu-exec.c
>> +++ b/accel/tcg/cpu-exec.c
>> @@ -252,7 +252,7 @@ static inline TranslationBlock *tb_lookup(CPUState *cpu, target_ulong pc,
>>       tcg_debug_assert(!(cflags & CF_INVALID));
>>   
>>       hash = tb_jmp_cache_hash_func(pc);
>> -    tb = qatomic_rcu_read(&cpu->tb_jmp_cache[hash]);
>> +    tb = qatomic_rcu_read(&cpu->tb_jmp_cache[hash].tb);
>>   
>>       if (likely(tb &&
>>                  tb->pc == pc &&
>> @@ -266,7 +266,7 @@ static inline TranslationBlock *tb_lookup(CPUState *cpu, target_ulong pc,
>>       if (tb == NULL) {
>>           return NULL;
>>       }
>> -    qatomic_set(&cpu->tb_jmp_cache[hash], tb);
>> +    qatomic_set(&cpu->tb_jmp_cache[hash].tb, tb);
>>       return tb;
>>   }
>>   
>> @@ -987,6 +987,8 @@ int cpu_exec(CPUState *cpu)
>>   
>>               tb = tb_lookup(cpu, pc, cs_base, flags, cflags);
>>               if (tb == NULL) {
>> +                uint32_t h;
>> +
>>                   mmap_lock();
>>                   tb = tb_gen_code(cpu, pc, cs_base, flags, cflags);
>>                   mmap_unlock();
>> @@ -994,7 +996,8 @@ int cpu_exec(CPUState *cpu)
>>                    * We add the TB in the virtual pc hash table
>>                    * for the fast lookup
>>                    */
>> -                qatomic_set(&cpu->tb_jmp_cache[tb_jmp_cache_hash_func(pc)], tb);
>> +                h = tb_jmp_cache_hash_func(pc);
>> +                qatomic_set(&cpu->tb_jmp_cache[h].tb, tb);
>>               }
>>   
>>   #ifndef CONFIG_USER_ONLY
>> diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c
>> index f5e6ca2da2..fb8f3087f1 100644
>> --- a/accel/tcg/cputlb.c
>> +++ b/accel/tcg/cputlb.c
>> @@ -103,7 +103,7 @@ static void tb_jmp_cache_clear_page(CPUState *cpu, target_ulong page_addr)
>>       unsigned int i, i0 = tb_jmp_cache_hash_page(page_addr);
>>   
>>       for (i = 0; i < TB_JMP_PAGE_SIZE; i++) {
>> -        qatomic_set(&cpu->tb_jmp_cache[i0 + i], NULL);
>> +        qatomic_set(&cpu->tb_jmp_cache[i0 + i].tb, NULL);
>>       }
>>   }
>>   
>> diff --git a/accel/tcg/translate-all.c b/accel/tcg/translate-all.c
>> index f429d33981..efa479ccf3 100644
>> --- a/accel/tcg/translate-all.c
>> +++ b/accel/tcg/translate-all.c
>> @@ -1187,8 +1187,8 @@ static void do_tb_phys_invalidate(TranslationBlock *tb, bool rm_from_page_list)
>>       /* remove the TB from the hash list */
>>       h = tb_jmp_cache_hash_func(tb->pc);
>>       CPU_FOREACH(cpu) {
>> -        if (qatomic_read(&cpu->tb_jmp_cache[h]) == tb) {
>> -            qatomic_set(&cpu->tb_jmp_cache[h], NULL);
>> +        if (qatomic_read(&cpu->tb_jmp_cache[h].tb) == tb) {
>> +            qatomic_set(&cpu->tb_jmp_cache[h].tb, NULL);
>>           }
>>       }
> 
> 



  reply	other threads:[~2022-09-29 16:48 UTC|newest]

Thread overview: 38+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2022-09-25 10:51 [PATCH v5 00/17] tcg: CPUTLBEntryFull and TARGET_TB_PCREL Richard Henderson
2022-09-25 10:51 ` [PATCH v5 01/17] accel/tcg: Rename CPUIOTLBEntry to CPUTLBEntryFull Richard Henderson
2022-09-29 11:45   ` Alex Bennée
2022-09-25 10:51 ` [PATCH v5 02/17] accel/tcg: Drop addr member from SavedIOTLB Richard Henderson
2022-09-29 11:46   ` Alex Bennée
2022-09-25 10:51 ` [PATCH v5 03/17] accel/tcg: Suppress auto-invalidate in probe_access_internal Richard Henderson
2022-09-29 11:49   ` Alex Bennée
2022-09-29 11:50   ` David Hildenbrand
2022-09-25 10:51 ` [PATCH v5 04/17] accel/tcg: Introduce probe_access_full Richard Henderson
2022-09-29 11:51   ` Alex Bennée
2022-09-25 10:51 ` [PATCH v5 05/17] accel/tcg: Introduce tlb_set_page_full Richard Henderson
2022-09-29 12:00   ` Alex Bennée
2022-09-25 10:51 ` [PATCH v5 06/17] include/exec: Introduce TARGET_PAGE_ENTRY_EXTRA Richard Henderson
2022-09-29 12:00   ` Alex Bennée
2022-09-25 10:51 ` [PATCH v5 07/17] target/sparc: Use tlb_set_page_full Richard Henderson
2022-09-25 10:51 ` [PATCH v5 08/17] accel/tcg: Move byte_swap from MemTxAttrs to CPUTLBEntryFull Richard Henderson
2022-09-29 12:27   ` Alex Bennée
2022-09-25 10:51 ` [PATCH v5 09/17] accel/tcg: Add force_aligned " Richard Henderson
2022-09-25 10:51 ` [PATCH v5 10/17] accel/tcg: Remove PageDesc code_bitmap Richard Henderson
2022-09-29 12:27   ` Alex Bennée
2022-09-25 10:51 ` [PATCH v5 11/17] accel/tcg: Use bool for page_find_alloc Richard Henderson
2022-09-25 10:51 ` [PATCH v5 12/17] accel/tcg: Use DisasContextBase in plugin_gen_tb_start Richard Henderson
2022-09-25 10:51 ` [PATCH v5 13/17] accel/tcg: Do not align tb->page_addr[0] Richard Henderson
2022-09-25 10:51 ` [PATCH v5 14/17] include/hw/core: Create struct CPUJumpCache Richard Henderson
2022-09-29 13:46   ` Alex Bennée
2022-09-29 16:22     ` Richard Henderson [this message]
2022-09-29 17:01       ` Alex Bennée
2022-09-25 10:51 ` [PATCH v5 15/17] accel/tcg: Introduce tb_pc and tb_pc_log Richard Henderson
2022-09-25 10:51 ` [PATCH v5 16/17] accel/tcg: Introduce TARGET_TB_PCREL Richard Henderson
2022-09-30 12:02   ` Peter Maydell
2022-09-30 12:59     ` Alex Bennée
2022-09-30 13:25       ` Peter Maydell
2022-09-30 14:57         ` Alex Bennée
2022-09-30 15:08           ` Peter Maydell
2022-09-30 17:35         ` Richard Henderson
2022-09-25 10:51 ` [PATCH v5 17/17] accel/tcg: Split log_cpu_exec into inline and slow path Richard Henderson
2022-09-29  2:16 ` [PATCH v5 00/17] tcg: CPUTLBEntryFull and TARGET_TB_PCREL Richard Henderson
2022-09-29  6:53   ` Mark Cave-Ayland

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=9db697ee-f8e1-1388-7675-42c46ae98fca@linaro.org \
    --to=richard.henderson@linaro.org \
    --cc=alex.bennee@linaro.org \
    --cc=f4bug@amsat.org \
    --cc=qemu-devel@nongnu.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).