qemu-devel.nongnu.org archive mirror
 help / color / mirror / Atom feed
From: Blue Swirl <blauwirbel@gmail.com>
To: Kirill Batuzov <batuzovk@ispras.ru>
Cc: qemu-devel@nongnu.org, zhur@ispras.ru
Subject: Re: [Qemu-devel] [RFC][PATCH v0 8/8] Add spill count profiling.
Date: Mon, 23 May 2011 22:32:16 +0300	[thread overview]
Message-ID: <BANLkTi=Uob0fA0aMWPOop6nG8foLB_CJbg@mail.gmail.com> (raw)
In-Reply-To: <1306161654-4388-9-git-send-email-batuzovk@ispras.ru>

On Mon, May 23, 2011 at 5:40 PM, Kirill Batuzov <batuzovk@ispras.ru> wrote:
> Gather generated spills statistics.  It is useful for debugging and evaluating
> of new register allocator.
>
> Signed-off-by: Kirill Batuzov <batuzovk@ispras.ru>
> ---
>  tcg/tcg.c |   69 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
>  tcg/tcg.h |    6 +++++
>  2 files changed, 75 insertions(+), 0 deletions(-)
>
> diff --git a/tcg/tcg.c b/tcg/tcg.c
> index 022eef9..ba2cddc 100644
> --- a/tcg/tcg.c
> +++ b/tcg/tcg.c
> @@ -1530,6 +1530,11 @@ static void temp_allocate_frame(TCGContext *s, int temp)
>     s->current_frame_offset += sizeof(tcg_target_long);
>  }
>
> +#ifdef CONFIG_PROFILER
> +enum { SPILL_REAL, SPILL_BB_END, SPILL_CALL_HWREG,
> +       SPILL_CALL_IARG, SPILL_CALL_CLOBBER } spill_cause;
> +#endif

How about moving this to TCGContext instead of using static variables?

> +
>  /* free register 'reg' by spilling the corresponding temporary if necessary */
>  static void tcg_reg_free(TCGContext *s, int reg)
>  {
> @@ -1544,6 +1549,26 @@ static void tcg_reg_free(TCGContext *s, int reg)
>             if (!ts->mem_allocated)
>                 temp_allocate_frame(s, temp);
>             tcg_out_st(s, ts->type, reg, ts->mem_reg, ts->mem_offset);
> +#ifdef CONFIG_PROFILER
> +            s->spill_count++;
> +            switch (spill_cause) {
> +            case SPILL_REAL:
> +                s->spill_real++;
> +                break;
> +            case SPILL_BB_END:
> +                s->spill_bb_end++;
> +                break;
> +            case SPILL_CALL_HWREG:
> +                s->spill_call_hwreg++;
> +                break;
> +            case SPILL_CALL_IARG:
> +                s->spill_call_iarg++;
> +                break;
> +            case SPILL_CALL_CLOBBER:
> +                s->spill_call_clobber++;
> +                break;
> +            }
> +#endif
>         }
>         ts->val_type = TEMP_VAL_MEM;
>         s->reg_to_temp[reg] = -1;
> @@ -1582,6 +1607,9 @@ static int tcg_reg_alloc(TCGContext *s, TCGRegSet reg1, TCGRegSet reg2)
>                 }
>             }
>  #else
> +#ifdef CONFIG_PROFILER
> +            spill_cause = SPILL_REAL;
> +#endif
>             tcg_reg_free(s, reg);
>             return reg;
>  #endif
> @@ -1590,6 +1618,9 @@ static int tcg_reg_alloc(TCGContext *s, TCGRegSet reg1, TCGRegSet reg2)
>
>  #ifdef USE_ADVANCED_REGALLOC
>     if (best_score >= 0 && best_reg >= 0) {
> +#ifdef CONFIG_PROFILER
> +        spill_cause = SPILL_REAL;
> +#endif
>         tcg_reg_free(s, best_reg);
>         return best_reg;
>     }
> @@ -1653,6 +1684,9 @@ static void tcg_reg_alloc_bb_end(TCGContext *s, TCGRegSet allocated_regs)
>     for(i = s->nb_globals; i < s->nb_temps; i++) {
>         ts = &s->temps[i];
>         if (ts->temp_local) {
> +#ifdef CONFIG_PROFILER
> +            spill_cause = SPILL_BB_END;
> +#endif
>             temp_save(s, i, allocated_regs);
>         } else {
>             if (ts->val_type == TEMP_VAL_REG) {
> @@ -1662,6 +1696,10 @@ static void tcg_reg_alloc_bb_end(TCGContext *s, TCGRegSet allocated_regs)
>         }
>     }
>
> +#ifdef CONFIG_PROFILER
> +    spill_cause = SPILL_BB_END;
> +#endif
> +
>     save_globals(s, allocated_regs);
>  }
>
> @@ -1860,12 +1898,18 @@ static void tcg_reg_alloc_op(TCGContext *s,
>             /* XXX: permit generic clobber register list ? */
>             for(reg = 0; reg < TCG_TARGET_NB_REGS; reg++) {
>                 if (tcg_regset_test_reg(tcg_target_call_clobber_regs, reg)) {
> +#ifdef CONFIG_PROFILER
> +                    spill_cause = SPILL_CALL_CLOBBER;
> +#endif
>                     tcg_reg_free(s, reg);
>                 }
>             }
>             /* XXX: for load/store we could do that only for the slow path
>                (i.e. when a memory callback is called) */
>
> +#ifdef CONFIG_PROFILER
> +            spill_cause = SPILL_CALL_HWREG;
> +#endif
>             /* store globals and free associated registers (we assume the insn
>                can modify any global. */
>             save_globals(s, allocated_regs);
> @@ -2001,6 +2045,9 @@ static int tcg_reg_alloc_call(TCGContext *s, const TCGOpDef *def,
>         if (arg != TCG_CALL_DUMMY_ARG) {
>             ts = &s->temps[arg];
>             reg = tcg_target_call_iarg_regs[i];
> +#ifdef CONFIG_PROFILER
> +            spill_cause = SPILL_CALL_IARG;
> +#endif
>             tcg_reg_free(s, reg);
>             if (ts->val_type == TEMP_VAL_REG) {
>                 if (ts->reg != reg) {
> @@ -2071,6 +2118,9 @@ static int tcg_reg_alloc_call(TCGContext *s, const TCGOpDef *def,
>     /* clobber call registers */
>     for(reg = 0; reg < TCG_TARGET_NB_REGS; reg++) {
>         if (tcg_regset_test_reg(tcg_target_call_clobber_regs, reg)) {
> +#ifdef CONFIG_PROFILER
> +            spill_cause = SPILL_CALL_CLOBBER;
> +#endif
>             tcg_reg_free(s, reg);
>         }
>     }
> @@ -2078,6 +2128,9 @@ static int tcg_reg_alloc_call(TCGContext *s, const TCGOpDef *def,
>     /* store globals and free associated registers (we assume the call
>        can modify any global. */
>     if (!(flags & TCG_CALL_CONST)) {
> +#ifdef CONFIG_PROFILER
> +        spill_cause = SPILL_CALL_HWREG;
> +#endif
>         save_globals(s, allocated_regs);
>     }
>
> @@ -2209,6 +2262,14 @@ static inline int tcg_gen_code_common(TCGContext *s, uint8_t *gen_code_buf,
>             if (args[0] < s->nb_globals) {
>                 if (tcg_op_defs[gen_opc_buf[param_next_use_ptr[0]]].flags
>                         & (TCG_OPF_CALL_CLOBBER | TCG_OPF_BB_END)) {
> +#ifdef CONFIG_PROFILER
> +                    if (tcg_op_defs[gen_opc_buf[param_next_use_ptr[0]]].flags
> +                            & TCG_OPF_CALL_CLOBBER) {
> +                        spill_cause = SPILL_CALL_HWREG;
> +                    } else {
> +                        spill_cause = SPILL_BB_END;
> +                    }
> +#endif
>                     tcg_reg_free(s, s->temps[args[0]].reg);
>                 }
>             }
> @@ -2354,6 +2415,14 @@ void tcg_dump_info(FILE *f, fprintf_function cpu_fprintf)
>                 s->restore_count);
>     cpu_fprintf(f, "  avg cycles        %0.1f\n",
>                 s->restore_count ? (double)s->restore_time / s->restore_count : 0);
> +    cpu_fprintf(f, "spill count         %" PRId64 "\n",
> +                s->spill_count);
> +    cpu_fprintf(f, "  real spills       %" PRId64 "\n", s->spill_real);
> +    cpu_fprintf(f, "  spills at bb end  %" PRId64 "\n", s->spill_bb_end);
> +    cpu_fprintf(f, "  spills at call:\n");
> +    cpu_fprintf(f, "    globals         %" PRId64 "\n", s->spill_call_hwreg);
> +    cpu_fprintf(f, "    iarg passing    %" PRId64 "\n", s->spill_call_iarg);
> +    cpu_fprintf(f, "    call cloobers   %" PRId64 "\n", s->spill_call_clobber);

cloober?

>
>     dump_op_count();
>  }
> diff --git a/tcg/tcg.h b/tcg/tcg.h
> index 9ff519e..722bd72 100644
> --- a/tcg/tcg.h
> +++ b/tcg/tcg.h
> @@ -328,6 +328,12 @@ struct TCGContext {
>     int64_t la_time;
>     int64_t restore_count;
>     int64_t restore_time;
> +    int64_t spill_count;
> +    int64_t spill_bb_end;
> +    int64_t spill_call_hwreg;
> +    int64_t spill_call_iarg;
> +    int64_t spill_call_clobber;
> +    int64_t spill_real;
>  #endif
>
>  #ifdef CONFIG_DEBUG_TCG
> --
> 1.7.4.1
>
>
>

  reply	other threads:[~2011-05-23 19:32 UTC|newest]

Thread overview: 16+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2011-05-23 14:40 [Qemu-devel] [RFC][PATCH v0 0/8] Improve register allocator Kirill Batuzov
2011-05-23 14:40 ` [Qemu-devel] [RFC][PATCH v0 1/8] Compute additional liveness information for " Kirill Batuzov
2011-05-23 14:40 ` [Qemu-devel] [RFC][PATCH v0 2/8] Propagate REG_NEXT_USE value through process of register allocation Kirill Batuzov
2011-05-23 14:40 ` [Qemu-devel] [RFC][PATCH v0 3/8] Do better spill choice Kirill Batuzov
2011-05-23 14:40 ` [Qemu-devel] [RFC][PATCH v0 4/8] Calculate NEXT_CALL liveness information Kirill Batuzov
2011-05-23 14:40 ` [Qemu-devel] [RFC][PATCH v0 5/8] Track call-clobbered uses of registers Kirill Batuzov
2011-05-23 14:40 ` [Qemu-devel] [RFC][PATCH v0 6/8] Spill globals early if their next use is in call Kirill Batuzov
2011-05-23 14:40 ` [Qemu-devel] [RFC][PATCH v0 7/8] Spill globals early if their next use is at the BB end Kirill Batuzov
2011-05-23 14:40 ` [Qemu-devel] [RFC][PATCH v0 8/8] Add spill count profiling Kirill Batuzov
2011-05-23 19:32   ` Blue Swirl [this message]
2011-05-23 21:22 ` [Qemu-devel] [RFC][PATCH v0 0/8] Improve register allocator Aurelien Jarno
2011-05-24 11:31   ` Kirill Batuzov
2011-05-24 12:40     ` Aurelien Jarno
2011-05-24 13:24     ` Laurent Desnogues
2011-05-24 13:32       ` Kirill Batuzov
2011-05-24 16:07     ` Richard Henderson

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to='BANLkTi=Uob0fA0aMWPOop6nG8foLB_CJbg@mail.gmail.com' \
    --to=blauwirbel@gmail.com \
    --cc=batuzovk@ispras.ru \
    --cc=qemu-devel@nongnu.org \
    --cc=zhur@ispras.ru \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).