qemu-devel.nongnu.org archive mirror
 help / color / mirror / Atom feed
From: "Alex Bennée" <alex.bennee@linaro.org>
To: Sergey Fedorov <sergey.fedorov@linaro.org>
Cc: qemu-devel@nongnu.org, Sergey Fedorov <serge.fdrv@gmail.com>,
	Paolo Bonzini <pbonzini@redhat.com>,
	Peter Crosthwaite <crosthwaite.peter@gmail.com>,
	Richard Henderson <rth@twiddle.net>
Subject: Re: [Qemu-devel] [PATCH v3 03/10] tcg: Rearrange tb_link_page() to avoid forward declaration
Date: Mon, 18 Apr 2016 18:20:01 +0100	[thread overview]
Message-ID: <87fuuix29q.fsf@linaro.org> (raw)
In-Reply-To: <1460324732-30330-4-git-send-email-sergey.fedorov@linaro.org>


Sergey Fedorov <sergey.fedorov@linaro.org> writes:

> From: Sergey Fedorov <serge.fdrv@gmail.com>
>
> Signed-off-by: Sergey Fedorov <serge.fdrv@gmail.com>
> Signed-off-by: Sergey Fedorov <sergey.fedorov@linaro.org>
> Reviewed-by: Alex Bennée <alex.bennee@linaro.org>

This clashes with the tcg clean-up patches. Should this series alway be
applied first?

> ---
>  translate-all.c | 204 ++++++++++++++++++++++++++++----------------------------
>  1 file changed, 101 insertions(+), 103 deletions(-)
>
> diff --git a/translate-all.c b/translate-all.c
> index ba71ff73f55f..7ac7916f2792 100644
> --- a/translate-all.c
> +++ b/translate-all.c
> @@ -153,8 +153,6 @@ void tb_lock_reset(void)
>  #endif
>  }
>
> -static void tb_link_page(TranslationBlock *tb, tb_page_addr_t phys_pc,
> -                         tb_page_addr_t phys_page2);
>  static TranslationBlock *tb_find_pc(uintptr_t tc_ptr);
>
>  void cpu_gen_init(void)
> @@ -1052,6 +1050,107 @@ static void build_page_bitmap(PageDesc *p)
>      }
>  }
>
> +/* add the tb in the target page and protect it if necessary
> + *
> + * Called with mmap_lock held for user-mode emulation.
> + */
> +static inline void tb_alloc_page(TranslationBlock *tb,
> +                                 unsigned int n, tb_page_addr_t page_addr)
> +{
> +    PageDesc *p;
> +#ifndef CONFIG_USER_ONLY
> +    bool page_already_protected;
> +#endif
> +
> +    tb->page_addr[n] = page_addr;
> +    p = page_find_alloc(page_addr >> TARGET_PAGE_BITS, 1);
> +    tb->page_next[n] = p->first_tb;
> +#ifndef CONFIG_USER_ONLY
> +    page_already_protected = p->first_tb != NULL;
> +#endif
> +    p->first_tb = (TranslationBlock *)((uintptr_t)tb | n);
> +    invalidate_page_bitmap(p);
> +
> +#if defined(CONFIG_USER_ONLY)
> +    if (p->flags & PAGE_WRITE) {
> +        target_ulong addr;
> +        PageDesc *p2;
> +        int prot;
> +
> +        /* force the host page as non writable (writes will have a
> +           page fault + mprotect overhead) */
> +        page_addr &= qemu_host_page_mask;
> +        prot = 0;
> +        for (addr = page_addr; addr < page_addr + qemu_host_page_size;
> +            addr += TARGET_PAGE_SIZE) {
> +
> +            p2 = page_find(addr >> TARGET_PAGE_BITS);
> +            if (!p2) {
> +                continue;
> +            }
> +            prot |= p2->flags;
> +            p2->flags &= ~PAGE_WRITE;
> +          }
> +        mprotect(g2h(page_addr), qemu_host_page_size,
> +                 (prot & PAGE_BITS) & ~PAGE_WRITE);
> +#ifdef DEBUG_TB_INVALIDATE
> +        printf("protecting code page: 0x" TARGET_FMT_lx "\n",
> +               page_addr);
> +#endif
> +    }
> +#else
> +    /* if some code is already present, then the pages are already
> +       protected. So we handle the case where only the first TB is
> +       allocated in a physical page */
> +    if (!page_already_protected) {
> +        tlb_protect_code(page_addr);
> +    }
> +#endif
> +}
> +
> +/* add a new TB and link it to the physical page tables. phys_page2 is
> + * (-1) to indicate that only one page contains the TB.
> + *
> + * Called with mmap_lock held for user-mode emulation.
> + */
> +static void tb_link_page(TranslationBlock *tb, tb_page_addr_t phys_pc,
> +                         tb_page_addr_t phys_page2)
> +{
> +    unsigned int h;
> +    TranslationBlock **ptb;
> +
> +    /* add in the physical hash table */
> +    h = tb_phys_hash_func(phys_pc);
> +    ptb = &tcg_ctx.tb_ctx.tb_phys_hash[h];
> +    tb->phys_hash_next = *ptb;
> +    *ptb = tb;
> +
> +    /* add in the page list */
> +    tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
> +    if (phys_page2 != -1) {
> +        tb_alloc_page(tb, 1, phys_page2);
> +    } else {
> +        tb->page_addr[1] = -1;
> +    }
> +
> +    assert(((uintptr_t)tb & 3) == 0);
> +    tb->jmp_list_first = (uintptr_t)tb | 2;
> +    tb->jmp_list_next[0] = (uintptr_t)NULL;
> +    tb->jmp_list_next[1] = (uintptr_t)NULL;
> +
> +    /* init original jump addresses */
> +    if (tb->jmp_reset_offset[0] != TB_JMP_RESET_OFFSET_INVALID) {
> +        tb_reset_jump(tb, 0);
> +    }
> +    if (tb->jmp_reset_offset[1] != TB_JMP_RESET_OFFSET_INVALID) {
> +        tb_reset_jump(tb, 1);
> +    }
> +
> +#ifdef DEBUG_TB_CHECK
> +    tb_page_check();
> +#endif
> +}
> +
>  /* Called with mmap_lock held for user mode emulation.  */
>  TranslationBlock *tb_gen_code(CPUState *cpu,
>                                target_ulong pc, target_ulong cs_base,
> @@ -1409,107 +1508,6 @@ static void tb_invalidate_phys_page(tb_page_addr_t addr,
>  }
>  #endif
>
> -/* add the tb in the target page and protect it if necessary
> - *
> - * Called with mmap_lock held for user-mode emulation.
> - */
> -static inline void tb_alloc_page(TranslationBlock *tb,
> -                                 unsigned int n, tb_page_addr_t page_addr)
> -{
> -    PageDesc *p;
> -#ifndef CONFIG_USER_ONLY
> -    bool page_already_protected;
> -#endif
> -
> -    tb->page_addr[n] = page_addr;
> -    p = page_find_alloc(page_addr >> TARGET_PAGE_BITS, 1);
> -    tb->page_next[n] = p->first_tb;
> -#ifndef CONFIG_USER_ONLY
> -    page_already_protected = p->first_tb != NULL;
> -#endif
> -    p->first_tb = (TranslationBlock *)((uintptr_t)tb | n);
> -    invalidate_page_bitmap(p);
> -
> -#if defined(CONFIG_USER_ONLY)
> -    if (p->flags & PAGE_WRITE) {
> -        target_ulong addr;
> -        PageDesc *p2;
> -        int prot;
> -
> -        /* force the host page as non writable (writes will have a
> -           page fault + mprotect overhead) */
> -        page_addr &= qemu_host_page_mask;
> -        prot = 0;
> -        for (addr = page_addr; addr < page_addr + qemu_host_page_size;
> -            addr += TARGET_PAGE_SIZE) {
> -
> -            p2 = page_find(addr >> TARGET_PAGE_BITS);
> -            if (!p2) {
> -                continue;
> -            }
> -            prot |= p2->flags;
> -            p2->flags &= ~PAGE_WRITE;
> -          }
> -        mprotect(g2h(page_addr), qemu_host_page_size,
> -                 (prot & PAGE_BITS) & ~PAGE_WRITE);
> -#ifdef DEBUG_TB_INVALIDATE
> -        printf("protecting code page: 0x" TARGET_FMT_lx "\n",
> -               page_addr);
> -#endif
> -    }
> -#else
> -    /* if some code is already present, then the pages are already
> -       protected. So we handle the case where only the first TB is
> -       allocated in a physical page */
> -    if (!page_already_protected) {
> -        tlb_protect_code(page_addr);
> -    }
> -#endif
> -}
> -
> -/* add a new TB and link it to the physical page tables. phys_page2 is
> - * (-1) to indicate that only one page contains the TB.
> - *
> - * Called with mmap_lock held for user-mode emulation.
> - */
> -static void tb_link_page(TranslationBlock *tb, tb_page_addr_t phys_pc,
> -                         tb_page_addr_t phys_page2)
> -{
> -    unsigned int h;
> -    TranslationBlock **ptb;
> -
> -    /* add in the physical hash table */
> -    h = tb_phys_hash_func(phys_pc);
> -    ptb = &tcg_ctx.tb_ctx.tb_phys_hash[h];
> -    tb->phys_hash_next = *ptb;
> -    *ptb = tb;
> -
> -    /* add in the page list */
> -    tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
> -    if (phys_page2 != -1) {
> -        tb_alloc_page(tb, 1, phys_page2);
> -    } else {
> -        tb->page_addr[1] = -1;
> -    }
> -
> -    assert(((uintptr_t)tb & 3) == 0);
> -    tb->jmp_list_first = (uintptr_t)tb | 2;
> -    tb->jmp_list_next[0] = (uintptr_t)NULL;
> -    tb->jmp_list_next[1] = (uintptr_t)NULL;
> -
> -    /* init original jump addresses */
> -    if (tb->jmp_reset_offset[0] != TB_JMP_RESET_OFFSET_INVALID) {
> -        tb_reset_jump(tb, 0);
> -    }
> -    if (tb->jmp_reset_offset[1] != TB_JMP_RESET_OFFSET_INVALID) {
> -        tb_reset_jump(tb, 1);
> -    }
> -
> -#ifdef DEBUG_TB_CHECK
> -    tb_page_check();
> -#endif
> -}
> -
>  /* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
>     tb[1].tc_ptr. Return NULL if not found */
>  static TranslationBlock *tb_find_pc(uintptr_t tc_ptr)


--
Alex Bennée

  reply	other threads:[~2016-04-18 17:20 UTC|newest]

Thread overview: 26+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2016-04-10 21:45 [Qemu-devel] [PATCH v3 00/10] tcg: Direct block chaining clean-up Sergey Fedorov
2016-04-10 21:45 ` [Qemu-devel] [PATCH v3 01/10] tcg: Clean up direct block chaining data fields Sergey Fedorov
2016-04-19 10:02   ` Alex Bennée
2016-04-10 21:45 ` [Qemu-devel] [PATCH v3 02/10] tcg: Use uintptr_t type for jmp_list_{next|first} fields of TB Sergey Fedorov
2016-04-19 10:34   ` Alex Bennée
2016-04-10 21:45 ` [Qemu-devel] [PATCH v3 03/10] tcg: Rearrange tb_link_page() to avoid forward declaration Sergey Fedorov
2016-04-18 17:20   ` Alex Bennée [this message]
2016-04-18 17:59     ` Sergey Fedorov
2016-04-10 21:45 ` [Qemu-devel] [PATCH v3 04/10] tcg: Init TB's direct jumps before making it visible Sergey Fedorov
2016-04-19 10:55   ` Alex Bennée
2016-04-19 12:42     ` Sergey Fedorov
2016-04-19 13:07       ` Alex Bennée
2016-04-10 21:45 ` [Qemu-devel] [PATCH v3 05/10] tcg: Clarify thread safety check in tb_add_jump() Sergey Fedorov
2016-04-19 11:01   ` Alex Bennée
2016-04-19 12:49     ` Sergey Fedorov
2016-04-19 15:27       ` Alex Bennée
2016-04-10 21:45 ` [Qemu-devel] [PATCH v3 06/10] tcg: Rename tb_jmp_remove() to tb_remove_from_jmp_list() Sergey Fedorov
2016-04-10 21:45 ` [Qemu-devel] [PATCH v3 07/10] tcg: Extract removing of jumps to TB from tb_phys_invalidate() Sergey Fedorov
2016-04-10 21:45 ` [Qemu-devel] [PATCH v3 08/10] tcg: Clean up tb_jmp_unlink() Sergey Fedorov
2016-04-10 21:45 ` [Qemu-devel] [PATCH v3 09/10] tcg: Clean up direct block chaining safety checks Sergey Fedorov
2016-04-19 11:37   ` Alex Bennée
2016-04-19 13:02     ` Sergey Fedorov
2016-04-19 14:53       ` Alex Bennée
2016-04-10 21:45 ` [Qemu-devel] [PATCH v3 10/10] tcg: Moderate direct block chaining safety checks in user mode Sergey Fedorov
2016-04-19 13:10   ` Alex Bennée
2016-04-19 13:17     ` Sergey Fedorov

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=87fuuix29q.fsf@linaro.org \
    --to=alex.bennee@linaro.org \
    --cc=crosthwaite.peter@gmail.com \
    --cc=pbonzini@redhat.com \
    --cc=qemu-devel@nongnu.org \
    --cc=rth@twiddle.net \
    --cc=serge.fdrv@gmail.com \
    --cc=sergey.fedorov@linaro.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).