qemu-devel.nongnu.org archive mirror
 help / color / mirror / Atom feed
* [Qemu-devel] PPC little-endian mode support
@ 2005-04-18  8:36 J. Mayer
  2005-04-22 21:25 ` art yerkes
  0 siblings, 1 reply; 2+ messages in thread
From: J. Mayer @ 2005-04-18  8:36 UTC (permalink / raw)
  To: qemu-devel

[-- Attachment #1: Type: text/plain, Size: 260 bytes --]

This patch adds little-endian mode support to PPC emulation.
This is needed by OS/2 and Windows NT and some programs like VirtualPC.
This patch have been tested using OS/2 bootloader (thanks to Tero
Kaarlela).

-- 
J. Mayer <l_indien@magic.fr>
Never organized

[-- Attachment #2: ppc_le.diff --]
[-- Type: text/x-patch, Size: 18357 bytes --]

Index: cpu-exec.c
===================================================================
RCS file: /cvsroot/qemu/qemu/cpu-exec.c,v
retrieving revision 1.53
diff -u -d -w -B -b -d -p -r1.53 cpu-exec.c
--- cpu-exec.c	7 Apr 2005 22:20:28 -0000	1.53
+++ cpu-exec.c	18 Apr 2005 08:23:19 -0000
@@ -364,7 +364,8 @@ int cpu_exec(CPUState *env1)
                 cs_base = env->npc;
                 pc = env->pc;
 #elif defined(TARGET_PPC)
-                flags = (msr_pr << MSR_PR) | (msr_fp << MSR_FP) | (msr_se << MSR_SE);
+                flags = (msr_pr << MSR_PR) | (msr_fp << MSR_FP) |
+                    (msr_se << MSR_SE) | (msr_le << MSR_LE);
                 cs_base = 0;
                 pc = env->nip;
 #else
Index: disas.c
===================================================================
RCS file: /cvsroot/qemu/qemu/disas.c,v
retrieving revision 1.20
diff -u -d -w -B -b -d -p -r1.20 disas.c
--- disas.c	7 Apr 2005 22:20:28 -0000	1.20
+++ disas.c	18 Apr 2005 08:23:19 -0000
@@ -141,6 +141,8 @@ void target_disas(FILE *out, target_ulon
 #elif defined(TARGET_SPARC)
     print_insn = print_insn_sparc;
 #elif defined(TARGET_PPC)
+    if (cpu_single_env->msr[MSR_LE])
+        disasm_info.endian = BFD_ENDIAN_LITTLE;
     print_insn = print_insn_ppc;
 #else
     fprintf(out, "0x" TARGET_FMT_lx
Index: target-ppc/op_helper_mem.h
===================================================================
RCS file: /cvsroot/qemu/qemu/target-ppc/op_helper_mem.h,v
retrieving revision 1.3
diff -u -d -w -B -b -d -p -r1.3 op_helper_mem.h
--- target-ppc/op_helper_mem.h	3 Jan 2005 23:42:39 -0000	1.3
+++ target-ppc/op_helper_mem.h	18 Apr 2005 08:23:19 -0000
@@ -40,4 +40,53 @@ void glue(do_stsw, MEMSUFFIX) (int src)
     }
 }
 
+void glue(do_lsw_le, MEMSUFFIX) (int dst)
+{
+    uint32_t tmp;
+    int sh;
+
+    if (loglevel > 0) {
+        fprintf(logfile, "%s: addr=0x%08x count=%d reg=%d\n",
+                __func__, T0, T1, dst);
+    }
+    for (; T1 > 3; T1 -= 4, T0 += 4) {
+        tmp = glue(ldl, MEMSUFFIX)(T0);
+        ugpr(dst++) = ((tmp & 0xFF000000) >> 24) | ((tmp & 0x00FF0000) >> 8) |
+            ((tmp & 0x0000FF00) << 8) | ((tmp & 0x000000FF) << 24);
+        if (dst == 32)
+            dst = 0;
+    }
+    if (T1 > 0) {
+        tmp = 0;
+        for (sh = 0; T1 > 0; T1--, T0++, sh += 8) {
+            tmp |= glue(ldub, MEMSUFFIX)(T0) << sh;
+        }
+        ugpr(dst) = tmp;
+    }
+}
+
+void glue(do_stsw_le, MEMSUFFIX) (int src)
+{
+    uint32_t tmp;
+    int sh;
+
+    if (loglevel > 0) {
+        fprintf(logfile, "%s: addr=0x%08x count=%d reg=%d\n",
+                __func__, T0, T1, src);
+    }
+    for (; T1 > 3; T1 -= 4, T0 += 4) {
+        tmp = ((ugpr(src++) & 0xFF000000) >> 24);
+        tmp |= ((ugpr(src++) & 0x00FF0000) >> 8);
+        tmp |= ((ugpr(src++) & 0x0000FF00) << 8);
+        tmp |= ((ugpr(src++) & 0x000000FF) << 24);
+        glue(stl, MEMSUFFIX)(T0, tmp);
+        if (src == 32)
+            src = 0;
+    }
+    if (T1 > 0) {
+        for (sh = 0; T1 > 0; T1--, T0++, sh += 8)
+            glue(stb, MEMSUFFIX)(T0, (ugpr(src) >> sh) & 0xFF);
+    }
+}
+
 #undef MEMSUFFIX
Index: target-ppc/op_mem.h
===================================================================
RCS file: /cvsroot/qemu/qemu/target-ppc/op_mem.h,v
retrieving revision 1.6
diff -u -d -w -B -b -d -p -r1.6 op_mem.h
--- target-ppc/op_mem.h	3 Jan 2005 23:42:39 -0000	1.6
+++ target-ppc/op_mem.h	18 Apr 2005 08:23:19 -0000
@@ -8,6 +8,12 @@ static inline uint16_t glue(ld16r, MEMSU
     return ((tmp & 0xFF00) >> 8) | ((tmp & 0x00FF) << 8);
 }
 
+static inline int32_t glue(ld16rs, MEMSUFFIX) (target_ulong EA)
+{
+    int16_t tmp = glue(lduw, MEMSUFFIX)(EA);
+    return ((tmp & 0xFF00) >> 8) | ((tmp & 0x00FF) << 8);
+}
+
 static inline uint32_t glue(ld32r, MEMSUFFIX) (target_ulong EA)
 {
     uint32_t tmp = glue(ldl, MEMSUFFIX)(EA);
@@ -48,17 +54,29 @@ PPC_LD_OP(ha, ldsw);
 PPC_LD_OP(hz, lduw);
 PPC_LD_OP(wz, ldl);
 
+PPC_LD_OP(ha_le, ld16rs);
+PPC_LD_OP(hz_le, ld16r);
+PPC_LD_OP(wz_le, ld32r);
+
 /***                              Integer store                            ***/
 PPC_ST_OP(b, stb);
 PPC_ST_OP(h, stw);
 PPC_ST_OP(w, stl);
 
+PPC_ST_OP(h_le, st16r);
+PPC_ST_OP(w_le, st32r);
+
 /***                Integer load and store with byte reverse               ***/
 PPC_LD_OP(hbr, ld16r);
 PPC_LD_OP(wbr, ld32r);
 PPC_ST_OP(hbr, st16r);
 PPC_ST_OP(wbr, st32r);
 
+PPC_LD_OP(hbr_le, lduw);
+PPC_LD_OP(wbr_le, ldl);
+PPC_ST_OP(hbr_le, stw);
+PPC_ST_OP(wbr_le, stl);
+
 /***                    Integer load and store multiple                    ***/
 PPC_OP(glue(lmw, MEMSUFFIX))
 {
@@ -80,6 +98,26 @@ PPC_OP(glue(stmw, MEMSUFFIX))
     RETURN();
 }
 
+PPC_OP(glue(lmw_le, MEMSUFFIX))
+{
+    int dst = PARAM(1);
+
+    for (; dst < 32; dst++, T0 += 4) {
+        ugpr(dst) = glue(ld32r, MEMSUFFIX)(T0);
+    }
+    RETURN();
+}
+
+PPC_OP(glue(stmw_le, MEMSUFFIX))
+{
+    int src = PARAM(1);
+
+    for (; src < 32; src++, T0 += 4) {
+        glue(st32r, MEMSUFFIX)(T0, ugpr(src));
+    }
+    RETURN();
+}
+
 /***                    Integer load and store strings                     ***/
 PPC_OP(glue(lswi, MEMSUFFIX))
 {
@@ -87,6 +125,13 @@ PPC_OP(glue(lswi, MEMSUFFIX))
     RETURN();
 }
 
+void glue(do_lsw_le, MEMSUFFIX) (int dst);
+PPC_OP(glue(lswi_le, MEMSUFFIX))
+{
+    glue(do_lsw_le, MEMSUFFIX)(PARAM(1));
+    RETURN();
+}
+
 /* PPC32 specification says we must generate an exception if
  * rA is in the range of registers to be loaded.
  * In an other hand, IBM says this is valid, but rA won't be loaded.
@@ -105,12 +150,32 @@ PPC_OP(glue(lswx, MEMSUFFIX))
     RETURN();
 }
 
+PPC_OP(glue(lswx_le, MEMSUFFIX))
+{
+    if (T1 > 0) {
+        if ((PARAM(1) < PARAM(2) && (PARAM(1) + T1) > PARAM(2)) ||
+            (PARAM(1) < PARAM(3) && (PARAM(1) + T1) > PARAM(3))) {
+            do_raise_exception_err(EXCP_PROGRAM, EXCP_INVAL | EXCP_INVAL_LSWX);
+        } else {
+            glue(do_lsw_le, MEMSUFFIX)(PARAM(1));
+        }
+    }
+    RETURN();
+}
+
 PPC_OP(glue(stsw, MEMSUFFIX))
 {
     glue(do_stsw, MEMSUFFIX)(PARAM(1));
     RETURN();
 }
 
+void glue(do_stsw_le, MEMSUFFIX) (int src);
+PPC_OP(glue(stsw_le, MEMSUFFIX))
+{
+    glue(do_stsw_le, MEMSUFFIX)(PARAM(1));
+    RETURN();
+}
+
 /***                         Floating-point store                          ***/
 #define PPC_STF_OP(name, op)                                                  \
 PPC_OP(glue(glue(st, name), MEMSUFFIX))                                       \
@@ -122,6 +187,43 @@ PPC_OP(glue(glue(st, name), MEMSUFFIX)) 
 PPC_STF_OP(fd, stfq);
 PPC_STF_OP(fs, stfl);
 
+static inline void glue(stfqr, MEMSUFFIX) (target_ulong EA, double d)
+{
+    union {
+        double d;
+        uint64_t u;
+    } u;
+
+    u.d = d;
+    u.u = ((u.u & 0xFF00000000000000ULL) >> 56) |
+        ((u.u & 0x00FF000000000000ULL) >> 40) |
+        ((u.u & 0x0000FF0000000000ULL) >> 24) |
+        ((u.u & 0x000000FF00000000ULL) >> 8) |
+        ((u.u & 0x00000000FF000000ULL) << 8) |
+        ((u.u & 0x0000000000FF0000ULL) << 24) |
+        ((u.u & 0x000000000000FF00ULL) << 40) |
+        ((u.u & 0x00000000000000FFULL) << 56);
+    glue(stfq, MEMSUFFIX)(EA, u.d);
+}
+
+static inline void glue(stflr, MEMSUFFIX) (target_ulong EA, float f)
+{
+    union {
+        float f;
+        uint32_t u;
+    } u;
+
+    u.f = f;
+    u.u = ((u.u & 0xFF000000UL) >> 24) |
+        ((u.u & 0x00FF0000ULL) >> 8) |
+        ((u.u & 0x0000FF00UL) << 8) |
+        ((u.u & 0x000000FFULL) << 24);
+    glue(stfl, MEMSUFFIX)(EA, u.f);
+}
+
+PPC_STF_OP(fd_le, stfqr);
+PPC_STF_OP(fs_le, stflr);
+
 /***                         Floating-point load                           ***/
 #define PPC_LDF_OP(name, op)                                                  \
 PPC_OP(glue(glue(l, name), MEMSUFFIX))                                        \
@@ -133,6 +235,45 @@ PPC_OP(glue(glue(l, name), MEMSUFFIX))  
 PPC_LDF_OP(fd, ldfq);
 PPC_LDF_OP(fs, ldfl);
 
+static inline double glue(ldfqr, MEMSUFFIX) (target_ulong EA)
+{
+    union {
+        double d;
+        uint64_t u;
+    } u;
+
+    u.d = glue(ldfq, MEMSUFFIX)(EA);
+    u.u = ((u.u & 0xFF00000000000000ULL) >> 56) |
+        ((u.u & 0x00FF000000000000ULL) >> 40) |
+        ((u.u & 0x0000FF0000000000ULL) >> 24) |
+        ((u.u & 0x000000FF00000000ULL) >> 8) |
+        ((u.u & 0x00000000FF000000ULL) << 8) |
+        ((u.u & 0x0000000000FF0000ULL) << 24) |
+        ((u.u & 0x000000000000FF00ULL) << 40) |
+        ((u.u & 0x00000000000000FFULL) << 56);
+
+    return u.d;
+}
+
+static inline float glue(ldflr, MEMSUFFIX) (target_ulong EA)
+{
+    union {
+        float f;
+        uint32_t u;
+    } u;
+
+    u.f = glue(ldfl, MEMSUFFIX)(EA);
+    u.u = ((u.u & 0xFF000000UL) >> 24) |
+        ((u.u & 0x00FF0000ULL) >> 8) |
+        ((u.u & 0x0000FF00UL) << 8) |
+        ((u.u & 0x000000FFULL) << 24);
+
+    return u.f;
+}
+
+PPC_LDF_OP(fd_le, ldfqr);
+PPC_LDF_OP(fs_le, ldflr);
+
 /* Load and set reservation */
 PPC_OP(glue(lwarx, MEMSUFFIX))
 {
@@ -145,6 +286,17 @@ PPC_OP(glue(lwarx, MEMSUFFIX))
     RETURN();
 }
 
+PPC_OP(glue(lwarx_le, MEMSUFFIX))
+{
+    if (T0 & 0x03) {
+        do_raise_exception(EXCP_ALIGN);
+    } else {
+       T1 = glue(ld32r, MEMSUFFIX)(T0);
+       regs->reserve = T0;
+    }
+    RETURN();
+}
+
 /* Store with reservation */
 PPC_OP(glue(stwcx, MEMSUFFIX))
 {
@@ -162,6 +314,22 @@ PPC_OP(glue(stwcx, MEMSUFFIX))
     RETURN();
 }
 
+PPC_OP(glue(stwcx_le, MEMSUFFIX))
+{
+    if (T0 & 0x03) {
+        do_raise_exception(EXCP_ALIGN);
+    } else {
+        if (regs->reserve != T0) {
+            env->crf[0] = xer_ov;
+        } else {
+            glue(st32r, MEMSUFFIX)(T0, T1);
+            env->crf[0] = xer_ov | 0x02;
+        }
+    }
+    regs->reserve = 0;
+    RETURN();
+}
+
 PPC_OP(glue(dcbz, MEMSUFFIX))
 {
     glue(stl, MEMSUFFIX)(T0 + 0x00, 0);
@@ -188,4 +356,16 @@ PPC_OP(glue(ecowx, MEMSUFFIX))
     RETURN();
 }
 
+PPC_OP(glue(eciwx_le, MEMSUFFIX))
+{
+    T1 = glue(ld32r, MEMSUFFIX)(T0);
+    RETURN();
+}
+
+PPC_OP(glue(ecowx_le, MEMSUFFIX))
+{
+    glue(st32r, MEMSUFFIX)(T0, T1);
+    RETURN();
+}
+
 #undef MEMSUFFIX
Index: target-ppc/translate.c
===================================================================
RCS file: /cvsroot/qemu/qemu/target-ppc/translate.c,v
retrieving revision 1.28
diff -u -d -w -B -b -d -p -r1.28 translate.c
--- target-ppc/translate.c	13 Mar 2005 17:01:22 -0000	1.28
+++ target-ppc/translate.c	18 Apr 2005 08:23:20 -0000
@@ -1040,22 +1046,41 @@ GEN_HANDLER(mtfsfi, 0x3F, 0x06, 0x04, 0x
 }
 
 /***                             Integer load                              ***/
+#define op_ldst(name)        (*gen_op_##name[ctx->mem_idx])()
 #if defined(CONFIG_USER_ONLY)
-#define op_ldst(name)        gen_op_##name##_raw()
-#define OP_LD_TABLE(width)
-#define OP_ST_TABLE(width)
+#define OP_LD_TABLE(width)                                                    \
+static GenOpFunc *gen_op_l##width[] = {                                       \
+    &gen_op_l##width##_raw,                                                   \
+    &gen_op_l##width##_le_raw,                                                \
+};
+#define OP_ST_TABLE(width)                                                    \
+static GenOpFunc *gen_op_st##width[] = {                                      \
+    &gen_op_st##width##_raw,                                                  \
+    &gen_op_st##width##_le_raw,                                               \
+};
+/* Byte access routine are endian safe */
+#define gen_op_stb_le_raw gen_op_stb_raw
+#define gen_op_lbz_le_raw gen_op_lbz_raw
 #else
-#define op_ldst(name)        (*gen_op_##name[ctx->mem_idx])()
 #define OP_LD_TABLE(width)                                                    \
 static GenOpFunc *gen_op_l##width[] = {                                       \
     &gen_op_l##width##_user,                                                  \
+    &gen_op_l##width##_le_user,                                               \
     &gen_op_l##width##_kernel,                                                \
-}
+    &gen_op_l##width##_le_kernel,                                             \
+};
 #define OP_ST_TABLE(width)                                                    \
 static GenOpFunc *gen_op_st##width[] = {                                      \
     &gen_op_st##width##_user,                                                 \
+    &gen_op_st##width##_le_user,                                              \
     &gen_op_st##width##_kernel,                                               \
-}
+    &gen_op_st##width##_le_kernel,                                            \
+};
+/* Byte access routine are endian safe */
+#define gen_op_stb_le_user gen_op_stb_user
+#define gen_op_lbz_le_user gen_op_lbz_user
+#define gen_op_stb_le_kernel gen_op_stb_kernel
+#define gen_op_lbz_le_kernel gen_op_lbz_kernel
 #endif
 
 #define GEN_LD(width, opc)                                                    \
@@ -1226,17 +1251,28 @@ OP_ST_TABLE(wbr);
 GEN_STX(wbr, 0x16, 0x14);
 
 /***                    Integer load and store multiple                    ***/
+#define op_ldstm(name, reg) (*gen_op_##name[ctx->mem_idx])(reg)
 #if defined(CONFIG_USER_ONLY)
-#define op_ldstm(name, reg) gen_op_##name##_raw(reg)
+static GenOpFunc1 *gen_op_lmw[] = {
+    &gen_op_lmw_raw,
+    &gen_op_lmw_le_raw,
+};
+static GenOpFunc1 *gen_op_stmw[] = {
+    &gen_op_stmw_raw,
+    &gen_op_stmw_le_raw,
+};
 #else
-#define op_ldstm(name, reg) (*gen_op_##name[ctx->mem_idx])(reg)
 static GenOpFunc1 *gen_op_lmw[] = {
     &gen_op_lmw_user,
+    &gen_op_lmw_le_user,
     &gen_op_lmw_kernel,
+    &gen_op_lmw_le_kernel,
 };
 static GenOpFunc1 *gen_op_stmw[] = {
     &gen_op_stmw_user,
+    &gen_op_stmw_le_user,
     &gen_op_stmw_kernel,
+    &gen_op_stmw_le_kernel,
 };
 #endif
 
@@ -1271,23 +1307,39 @@ GEN_HANDLER(stmw, 0x2F, 0xFF, 0xFF, 0x00
 }
 
 /***                    Integer load and store strings                     ***/
-#if defined(CONFIG_USER_ONLY)
-#define op_ldsts(name, start) gen_op_##name##_raw(start)
-#define op_ldstsx(name, rd, ra, rb) gen_op_##name##_raw(rd, ra, rb)
-#else
 #define op_ldsts(name, start) (*gen_op_##name[ctx->mem_idx])(start)
 #define op_ldstsx(name, rd, ra, rb) (*gen_op_##name[ctx->mem_idx])(rd, ra, rb)
+#if defined(CONFIG_USER_ONLY)
+static GenOpFunc1 *gen_op_lswi[] = {
+    &gen_op_lswi_raw,
+    &gen_op_lswi_le_raw,
+};
+static GenOpFunc3 *gen_op_lswx[] = {
+    &gen_op_lswx_raw,
+    &gen_op_lswx_le_raw,
+};
+static GenOpFunc1 *gen_op_stsw[] = {
+    &gen_op_stsw_raw,
+    &gen_op_stsw_le_raw,
+};
+#else
 static GenOpFunc1 *gen_op_lswi[] = {
     &gen_op_lswi_user,
+    &gen_op_lswi_le_user,
     &gen_op_lswi_kernel,
+    &gen_op_lswi_le_kernel,
 };
 static GenOpFunc3 *gen_op_lswx[] = {
     &gen_op_lswx_user,
+    &gen_op_lswx_le_user,
     &gen_op_lswx_kernel,
+    &gen_op_lswx_le_kernel,
 };
 static GenOpFunc1 *gen_op_stsw[] = {
     &gen_op_stsw_user,
+    &gen_op_stsw_le_user,
     &gen_op_stsw_kernel,
+    &gen_op_stsw_le_kernel,
 };
 #endif
 
@@ -1383,23 +1435,33 @@ GEN_HANDLER(isync, 0x13, 0x16, 0xFF, 0x0
 {
 }
 
-/* lwarx */
+#define op_lwarx() (*gen_op_lwarx[ctx->mem_idx])()
+#define op_stwcx() (*gen_op_stwcx[ctx->mem_idx])()
 #if defined(CONFIG_USER_ONLY)
-#define op_lwarx() gen_op_lwarx_raw()
-#define op_stwcx() gen_op_stwcx_raw()
+static GenOpFunc *gen_op_lwarx[] = {
+    &gen_op_lwarx_raw,
+    &gen_op_lwarx_le_raw,
+};
+static GenOpFunc *gen_op_stwcx[] = {
+    &gen_op_stwcx_raw,
+    &gen_op_stwcx_le_raw,
+};
 #else
-#define op_lwarx() (*gen_op_lwarx[ctx->mem_idx])()
 static GenOpFunc *gen_op_lwarx[] = {
     &gen_op_lwarx_user,
+    &gen_op_lwarx_le_user,
     &gen_op_lwarx_kernel,
+    &gen_op_lwarx_le_kernel,
 };
-#define op_stwcx() (*gen_op_stwcx[ctx->mem_idx])()
 static GenOpFunc *gen_op_stwcx[] = {
     &gen_op_stwcx_user,
+    &gen_op_stwcx_le_user,
     &gen_op_stwcx_kernel,
+    &gen_op_stwcx_le_kernel,
 };
 #endif
 
+/* lwarx */
 GEN_HANDLER(lwarx, 0x1F, 0x14, 0xFF, 0x00000001, PPC_RES)
 {
     if (rA(ctx->opcode) == 0) {
@@ -2492,23 +2554,33 @@ GEN_HANDLER(tlbsync, 0x1F, 0x16, 0x11, 0
 
 /***                              External control                         ***/
 /* Optional: */
-/* eciwx */
-#if defined(CONFIG_USER_ONLY)
-#define op_eciwx() gen_op_eciwx_raw()
-#define op_ecowx() gen_op_ecowx_raw()
-#else
 #define op_eciwx() (*gen_op_eciwx[ctx->mem_idx])()
 #define op_ecowx() (*gen_op_ecowx[ctx->mem_idx])()
+#if defined(CONFIG_USER_ONLY)
+static GenOpFunc *gen_op_eciwx[] = {
+    &gen_op_eciwx_raw,
+    &gen_op_eciwx_le_raw,
+};
+static GenOpFunc *gen_op_ecowx[] = {
+    &gen_op_ecowx_raw,
+    &gen_op_ecowx_le_raw,
+};
+#else
 static GenOpFunc *gen_op_eciwx[] = {
     &gen_op_eciwx_user,
+    &gen_op_eciwx_le_user,
     &gen_op_eciwx_kernel,
+    &gen_op_eciwx_le_kernel,
 };
 static GenOpFunc *gen_op_ecowx[] = {
     &gen_op_ecowx_user,
+    &gen_op_ecowx_le_user,
     &gen_op_ecowx_kernel,
+    &gen_op_ecowx_le_kernel,
 };
 #endif
 
+/* eciwx */
 GEN_HANDLER(eciwx, 0x1F, 0x16, 0x0D, 0x00000001, PPC_EXTERN)
 {
     /* Should check EAR[E] & alignment ! */
@@ -3137,10 +3209,10 @@ int gen_intermediate_code_internal (CPUS
     ctx.tb = tb;
     ctx.exception = EXCP_NONE;
 #if defined(CONFIG_USER_ONLY)
-    ctx.mem_idx = 0;
+    ctx.mem_idx = msr_le;
 #else
     ctx.supervisor = 1 - msr_pr;
-    ctx.mem_idx = 1 - msr_pr;
+    ctx.mem_idx = ((1 - msr_pr) << 1) | msr_le;
 #endif
     ctx.fpu_enabled = msr_fp;
 #if defined (DO_SINGLE_STEP)
@@ -3167,11 +3239,17 @@ int gen_intermediate_code_internal (CPUS
         }
 #endif
         ctx.opcode = ldl_code(ctx.nip);
+        if (msr_le) {
+            ctx.opcode = ((ctx.opcode & 0xFF000000) >> 24) |
+                ((ctx.opcode & 0x00FF0000) >> 8) |
+                ((ctx.opcode & 0x0000FF00) << 8) |
+                ((ctx.opcode & 0x000000FF) << 24);
+        }
 #if defined PPC_DEBUG_DISAS
         if (loglevel & CPU_LOG_TB_IN_ASM) {
-            fprintf(logfile, "translate opcode %08x (%02x %02x %02x)\n",
+            fprintf(logfile, "translate opcode %08x (%02x %02x %02x) (%s)\n",
                     ctx.opcode, opc1(ctx.opcode), opc2(ctx.opcode),
-                    opc3(ctx.opcode));
+                    opc3(ctx.opcode), msr_le ? "little" : "big");
         }
 #endif
         ctx.nip += 4;

^ permalink raw reply	[flat|nested] 2+ messages in thread

* Re: [Qemu-devel] PPC little-endian mode support
  2005-04-18  8:36 [Qemu-devel] PPC little-endian mode support J. Mayer
@ 2005-04-22 21:25 ` art yerkes
  0 siblings, 0 replies; 2+ messages in thread
From: art yerkes @ 2005-04-22 21:25 UTC (permalink / raw)
  To: qemu-devel

On Mon, 18 Apr 2005 10:36:38 +0200
"J. Mayer" <l_indien@magic.fr> wrote:

> This patch adds little-endian mode support to PPC emulation.
> This is needed by OS/2 and Windows NT and some programs like VirtualPC.
> This patch have been tested using OS/2 bootloader (thanks to Tero
> Kaarlela).

Thanks for doing little-endian support.

-- 
Here's a simple experiment. Stand on a train track between two locomotives
which are pushing on you with equal force in opposite directions. You will
exhibit no net motion. None the less, you may soon begin to notice that
something important is happening.
-- Robert Stirniman

^ permalink raw reply	[flat|nested] 2+ messages in thread

end of thread, other threads:[~2005-04-22 20:45 UTC | newest]

Thread overview: 2+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2005-04-18  8:36 [Qemu-devel] PPC little-endian mode support J. Mayer
2005-04-22 21:25 ` art yerkes

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).