qemu-devel.nongnu.org archive mirror
 help / color / mirror / Atom feed
* [Qemu-devel] [PATCH] tcg: fix segfault when MO_UNALN is set
@ 2015-05-26 12:46 Yongbok Kim
  2015-05-26 15:49 ` Richard Henderson
  0 siblings, 1 reply; 4+ messages in thread
From: Yongbok Kim @ 2015-05-26 12:46 UTC (permalink / raw)
  To: qemu-devel; +Cc: peter.maydell, leon.alrae, rth

MO_UNALN caused segfaults when it is set, it reached out of boundary of
load/ store function pointer arrays in tcg_out_qemu_{ld,st}_slow_path()
or its equivalents.
This patch introduces a new macro get_memalign to be used to get alignment
information from combined op/idx parameter. The get_memop is now returning
without alignment information to avoid the segfaults.

Signed-off-by: Yongbok Kim <yongbok.kim@imgtec.com>
---
 softmmu_template.h |   24 ++++++++++++------------
 tcg/tcg.h          |   14 +++++++++++++-
 2 files changed, 25 insertions(+), 13 deletions(-)

diff --git a/softmmu_template.h b/softmmu_template.h
index 4778473..1a1de4a 100644
--- a/softmmu_template.h
+++ b/softmmu_template.h
@@ -184,7 +184,7 @@ WORD_TYPE helper_le_ld_name(CPUArchState *env, target_ulong addr,
     if ((addr & TARGET_PAGE_MASK)
          != (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
         if ((addr & (DATA_SIZE - 1)) != 0
-            && (get_memop(oi) & MO_AMASK) == MO_ALIGN) {
+            && get_memalign(oi) == MO_ALIGN) {
             cpu_unaligned_access(ENV_GET_CPU(env), addr, READ_ACCESS_TYPE,
                                  mmu_idx, retaddr);
         }
@@ -218,7 +218,7 @@ WORD_TYPE helper_le_ld_name(CPUArchState *env, target_ulong addr,
         DATA_TYPE res1, res2;
         unsigned shift;
     do_unaligned_access:
-        if ((get_memop(oi) & MO_AMASK) == MO_ALIGN) {
+        if (get_memalign(oi) == MO_ALIGN) {
             cpu_unaligned_access(ENV_GET_CPU(env), addr, READ_ACCESS_TYPE,
                                  mmu_idx, retaddr);
         }
@@ -237,7 +237,7 @@ WORD_TYPE helper_le_ld_name(CPUArchState *env, target_ulong addr,
 
     /* Handle aligned access or unaligned access in the same page.  */
     if ((addr & (DATA_SIZE - 1)) != 0
-        && (get_memop(oi) & MO_AMASK) == MO_ALIGN) {
+        && get_memalign(oi) == MO_ALIGN) {
         cpu_unaligned_access(ENV_GET_CPU(env), addr, READ_ACCESS_TYPE,
                              mmu_idx, retaddr);
     }
@@ -271,7 +271,7 @@ WORD_TYPE helper_be_ld_name(CPUArchState *env, target_ulong addr,
     if ((addr & TARGET_PAGE_MASK)
          != (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
         if ((addr & (DATA_SIZE - 1)) != 0
-            && (get_memop(oi) & MO_AMASK) == MO_ALIGN) {
+            && get_memalign(oi) == MO_ALIGN) {
             cpu_unaligned_access(ENV_GET_CPU(env), addr, READ_ACCESS_TYPE,
                                  mmu_idx, retaddr);
         }
@@ -305,7 +305,7 @@ WORD_TYPE helper_be_ld_name(CPUArchState *env, target_ulong addr,
         DATA_TYPE res1, res2;
         unsigned shift;
     do_unaligned_access:
-        if ((get_memop(oi) & MO_AMASK) == MO_ALIGN) {
+        if (get_memalign(oi) == MO_ALIGN) {
             cpu_unaligned_access(ENV_GET_CPU(env), addr, READ_ACCESS_TYPE,
                                  mmu_idx, retaddr);
         }
@@ -324,7 +324,7 @@ WORD_TYPE helper_be_ld_name(CPUArchState *env, target_ulong addr,
 
     /* Handle aligned access or unaligned access in the same page.  */
     if ((addr & (DATA_SIZE - 1)) != 0
-        && (get_memop(oi) & MO_AMASK) == MO_ALIGN) {
+        && get_memalign(oi) == MO_ALIGN) {
         cpu_unaligned_access(ENV_GET_CPU(env), addr, READ_ACCESS_TYPE,
                              mmu_idx, retaddr);
     }
@@ -399,7 +399,7 @@ void helper_le_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val,
     if ((addr & TARGET_PAGE_MASK)
         != (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
         if ((addr & (DATA_SIZE - 1)) != 0
-            && (get_memop(oi) & MO_AMASK) == MO_ALIGN) {
+            && get_memalign(oi) == MO_ALIGN) {
             cpu_unaligned_access(ENV_GET_CPU(env), addr, MMU_DATA_STORE,
                                  mmu_idx, retaddr);
         }
@@ -430,7 +430,7 @@ void helper_le_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val,
                      >= TARGET_PAGE_SIZE)) {
         int i;
     do_unaligned_access:
-        if ((get_memop(oi) & MO_AMASK) == MO_ALIGN) {
+        if (get_memalign(oi) == MO_ALIGN) {
             cpu_unaligned_access(ENV_GET_CPU(env), addr, MMU_DATA_STORE,
                                  mmu_idx, retaddr);
         }
@@ -450,7 +450,7 @@ void helper_le_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val,
 
     /* Handle aligned access or unaligned access in the same page.  */
     if ((addr & (DATA_SIZE - 1)) != 0
-        && (get_memop(oi) & MO_AMASK) == MO_ALIGN) {
+        && get_memalign(oi) == MO_ALIGN) {
         cpu_unaligned_access(ENV_GET_CPU(env), addr, MMU_DATA_STORE,
                              mmu_idx, retaddr);
     }
@@ -479,7 +479,7 @@ void helper_be_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val,
     if ((addr & TARGET_PAGE_MASK)
         != (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
         if ((addr & (DATA_SIZE - 1)) != 0
-            && (get_memop(oi) & MO_AMASK) == MO_ALIGN) {
+            && get_memalign(oi) == MO_ALIGN) {
             cpu_unaligned_access(ENV_GET_CPU(env), addr, MMU_DATA_STORE,
                                  mmu_idx, retaddr);
         }
@@ -510,7 +510,7 @@ void helper_be_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val,
                      >= TARGET_PAGE_SIZE)) {
         int i;
     do_unaligned_access:
-        if ((get_memop(oi) & MO_AMASK) == MO_ALIGN) {
+        if (get_memalign(oi) == MO_ALIGN) {
             cpu_unaligned_access(ENV_GET_CPU(env), addr, MMU_DATA_STORE,
                                  mmu_idx, retaddr);
         }
@@ -530,7 +530,7 @@ void helper_be_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val,
 
     /* Handle aligned access or unaligned access in the same page.  */
     if ((addr & (DATA_SIZE - 1)) != 0
-        && (get_memop(oi) & MO_AMASK) == MO_ALIGN) {
+        && get_memalign(oi) == MO_ALIGN) {
         cpu_unaligned_access(ENV_GET_CPU(env), addr, MMU_DATA_STORE,
                              mmu_idx, retaddr);
     }
diff --git a/tcg/tcg.h b/tcg/tcg.h
index 8098f82..849c51a 100644
--- a/tcg/tcg.h
+++ b/tcg/tcg.h
@@ -863,7 +863,19 @@ static inline TCGMemOpIdx make_memop_idx(TCGMemOp op, unsigned idx)
  */
 static inline TCGMemOp get_memop(TCGMemOpIdx oi)
 {
-    return oi >> 4;
+    return (oi >> 4) & ~MO_AMASK;
+}
+
+/**
+ * get_memalign
+ * @oi: combined op/idx parameter
+ *
+ * Extract the memory alignment information from the combined value.
+ */
+
+static inline TCGMemOp get_memalign(TCGMemOpIdx oi)
+{
+    return (oi >> 4) & MO_AMASK;
 }
 
 /**
-- 
1.7.5.4

^ permalink raw reply related	[flat|nested] 4+ messages in thread

* Re: [Qemu-devel] [PATCH] tcg: fix segfault when MO_UNALN is set
  2015-05-26 12:46 [Qemu-devel] [PATCH] tcg: fix segfault when MO_UNALN is set Yongbok Kim
@ 2015-05-26 15:49 ` Richard Henderson
  2015-05-26 15:57   ` Yongbok Kim
  0 siblings, 1 reply; 4+ messages in thread
From: Richard Henderson @ 2015-05-26 15:49 UTC (permalink / raw)
  To: Yongbok Kim, qemu-devel; +Cc: peter.maydell, leon.alrae

On 05/26/2015 05:46 AM, Yongbok Kim wrote:
> MO_UNALN caused segfaults when it is set, it reached out of boundary of
> load/ store function pointer arrays in tcg_out_qemu_{ld,st}_slow_path()
> or its equivalents.

I'd like to know more about this crash please.  Where does it happen?


r~

^ permalink raw reply	[flat|nested] 4+ messages in thread

* Re: [Qemu-devel] [PATCH] tcg: fix segfault when MO_UNALN is set
  2015-05-26 15:49 ` Richard Henderson
@ 2015-05-26 15:57   ` Yongbok Kim
  2015-05-26 16:10     ` Richard Henderson
  0 siblings, 1 reply; 4+ messages in thread
From: Yongbok Kim @ 2015-05-26 15:57 UTC (permalink / raw)
  To: Richard Henderson, qemu-devel; +Cc: peter.maydell, leon.alrae

On 26/05/2015 16:49, Richard Henderson wrote:
> On 05/26/2015 05:46 AM, Yongbok Kim wrote:
>> MO_UNALN caused segfaults when it is set, it reached out of boundary of
>> load/ store function pointer arrays in tcg_out_qemu_{ld,st}_slow_path()
>> or its equivalents.
> 
> I'd like to know more about this crash please.  Where does it happen?
> 
> 
> r~
> 

tcg/i386/tcg-target.c

> static void * const qemu_st_helpers[16] = {
>     [MO_UB]   = helper_ret_stb_mmu,
>     [MO_LEUW] = helper_le_stw_mmu,
>     [MO_LEUL] = helper_le_stl_mmu,
>     [MO_LEQ]  = helper_le_stq_mmu,
>     [MO_BEUW] = helper_be_stw_mmu,
>     [MO_BEUL] = helper_be_stl_mmu,
>     [MO_BEQ]  = helper_be_stq_mmu,
> };

...

> static void tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
> {

>     TCGMemOp opc = get_memop(oi);

>     /* "Tail call" to the helper, with the return address back inline.  */
>     tcg_out_push(s, retaddr);
>     tcg_out_jmp(s, qemu_st_helpers[opc]);

Here is the crashing point...

Regards,
Yongbok

^ permalink raw reply	[flat|nested] 4+ messages in thread

* Re: [Qemu-devel] [PATCH] tcg: fix segfault when MO_UNALN is set
  2015-05-26 15:57   ` Yongbok Kim
@ 2015-05-26 16:10     ` Richard Henderson
  0 siblings, 0 replies; 4+ messages in thread
From: Richard Henderson @ 2015-05-26 16:10 UTC (permalink / raw)
  To: Yongbok Kim, qemu-devel; +Cc: peter.maydell, leon.alrae

On 05/26/2015 08:57 AM, Yongbok Kim wrote:
> On 26/05/2015 16:49, Richard Henderson wrote:
>> On 05/26/2015 05:46 AM, Yongbok Kim wrote:
>>> MO_UNALN caused segfaults when it is set, it reached out of boundary of
>>> load/ store function pointer arrays in tcg_out_qemu_{ld,st}_slow_path()
>>> or its equivalents.
>>
>> I'd like to know more about this crash please.  Where does it happen?
>>
>>
>> r~
>>
> 
> tcg/i386/tcg-target.c
> 
>> static void * const qemu_st_helpers[16] = {
>>     [MO_UB]   = helper_ret_stb_mmu,
>>     [MO_LEUW] = helper_le_stw_mmu,
>>     [MO_LEUL] = helper_le_stl_mmu,
>>     [MO_LEQ]  = helper_le_stq_mmu,
>>     [MO_BEUW] = helper_be_stw_mmu,
>>     [MO_BEUL] = helper_be_stl_mmu,
>>     [MO_BEQ]  = helper_be_stq_mmu,
>> };
> 
> ...
> 
>> static void tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
>> {
> 
>>     TCGMemOp opc = get_memop(oi);
> 
>>     /* "Tail call" to the helper, with the return address back inline.  */
>>     tcg_out_push(s, retaddr);
>>     tcg_out_jmp(s, qemu_st_helpers[opc]);
> 
> Here is the crashing point...

Ah, I think I'd masked things in there.  But clearly not.

Your patch has the nice property of not having to modify all the backends, but
it has the unfortunate property that make* and get* become asymmetrical.

I'll try to come up with an alternative soon, and we'll see how messy it gets.


r~

^ permalink raw reply	[flat|nested] 4+ messages in thread

end of thread, other threads:[~2015-05-26 16:10 UTC | newest]

Thread overview: 4+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2015-05-26 12:46 [Qemu-devel] [PATCH] tcg: fix segfault when MO_UNALN is set Yongbok Kim
2015-05-26 15:49 ` Richard Henderson
2015-05-26 15:57   ` Yongbok Kim
2015-05-26 16:10     ` Richard Henderson

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).