From: "J. Mayer" <l_indien@magic.fr>
To: qemu-devel@nongnu.org
Subject: [Qemu-devel] RFC: reverse-endian softmmu memory accessors
Date: Sat, 13 Oct 2007 11:56:12 +0200 [thread overview]
Message-ID: <1192269372.9976.305.camel@rapid> (raw)
[-- Attachment #1: Type: text/plain, Size: 2319 bytes --]
The problem:
some CPU architectures, namely PowerPC and maybe others, offers
facilities to access the memory or I/O in the reverse endianness, ie
little-endian instead of big-endian for PowerPC, or provide instruction
to make memory accesses in the "reverse-endian". This is implemented as
a global flag on some CPU. This case is already handled by the PowerPC
emulation but is is far from being optimal. Some other implementations
allow the OS to store an "reverse-endian" flag in the TLB or the segment
descriptors, thus providing per-page or per-segment endianness control.
This is mostly used to ease driver migration from a PC platform to
PowerPC without taking any care of the device endianness in the driver
code (yes, this is bad...).
Proposal:
here's a patch that implement "reverse-endian" low-level memory
accessors. It also provide an IO_MEM_REVERSE flag for TLBs. This flag is
handled in the I/O case of softmmu low-level routines which means that
it does not slowdown "native-endian" memory accesses and only add a
one-bit test on the "native-endian" I/O access, which should not be a
problem as I/O accesses are already slower, being handled via a
callback. As a side effect this patch allows me to delete large parts of
the target-ppc/op_mem.h and target-ppc/op_helper_mem.h as it makes
little-endian memory accessors directly available. And the translated
code for all little-endian access also becomes smaller, which has even a
visible effect on the mean translated block size (as reported by the
"info jit" monitor command) as lwbrx and lhbrx, which does memory
accesses with byteswap, are widely used in PowerPC code.
Warning:
this patch is to be taken as a proof of concept, for now. It works and
does not bring any visible regression to the PowerPC emulation but may
be bugged somewhere and generates conflicts if applied against the
CPU_MMU_INDEX patch. It is very invasive in the PowerPC target code but
needs just a few adds for other targets. It also brings quite a lot of
changes in the softmmu headers but is supposed not to change the
"native-endian" pathes (or it's a bug).
One will also notice that I also added "reverse-endian" byte access
routines. Those are, in fact, not needed but I let them just for
consistency.
Please comment.
--
J. Mayer <l_indien@magic.fr>
Never organized
[-- Attachment #2: softmmu_reverse_endian.diff --]
[-- Type: text/x-patch, Size: 87071 bytes --]
Index: cpu-all.h
===================================================================
RCS file: /sources/qemu/qemu/cpu-all.h,v
retrieving revision 1.76
diff -u -d -d -p -r1.76 cpu-all.h
--- cpu-all.h 23 Sep 2007 15:28:03 -0000 1.76
+++ cpu-all.h 12 Oct 2007 07:14:43 -0000
@@ -161,7 +161,7 @@ typedef union {
*
* endian is:
* (empty): target cpu endianness or 8 bit access
- * r : reversed target cpu endianness (not implemented yet)
+ * r : reversed target cpu endianness
* be : big endian (not implemented yet)
* le : little endian (not implemented yet)
*
@@ -557,6 +557,7 @@ static inline void stfq_be_p(void *ptr,
/* target CPU memory access functions */
#if defined(TARGET_WORDS_BIGENDIAN)
+/* native-endian */
#define lduw_p(p) lduw_be_p(p)
#define ldsw_p(p) ldsw_be_p(p)
#define ldl_p(p) ldl_be_p(p)
@@ -568,7 +569,20 @@ static inline void stfq_be_p(void *ptr,
#define stq_p(p, v) stq_be_p(p, v)
#define stfl_p(p, v) stfl_be_p(p, v)
#define stfq_p(p, v) stfq_be_p(p, v)
+/* reverse-endian */
+#define lduwr_p(p) lduw_le_p(p)
+#define ldswr_p(p) ldsw_le_p(p)
+#define ldlr_p(p) ldl_le_p(p)
+#define ldqr_p(p) ldq_le_p(p)
+#define ldflr_p(p) ldfl_le_p(p)
+#define ldfqr_p(p) ldfq_le_p(p)
+#define stwr_p(p, v) stw_le_p(p, v)
+#define stlr_p(p, v) stl_le_p(p, v)
+#define stqr_p(p, v) stq_le_p(p, v)
+#define stflr_p(p, v) stfl_le_p(p, v)
+#define stfqr_p(p, v) stfq_le_p(p, v)
#else
+/* native-endian */
#define lduw_p(p) lduw_le_p(p)
#define ldsw_p(p) ldsw_le_p(p)
#define ldl_p(p) ldl_le_p(p)
@@ -580,6 +594,18 @@ static inline void stfq_be_p(void *ptr,
#define stq_p(p, v) stq_le_p(p, v)
#define stfl_p(p, v) stfl_le_p(p, v)
#define stfq_p(p, v) stfq_le_p(p, v)
+/* reverse-endian */
+#define lduwr_p(p) lduw_be_p(p)
+#define ldswr_p(p) ldsw_be_p(p)
+#define ldlr_p(p) ldl_be_p(p)
+#define ldqr_p(p) ldq_be_p(p)
+#define ldflr_p(p) ldfl_be_p(p)
+#define ldfqr_p(p) ldfq_be_p(p)
+#define stwr_p(p, v) stw_be_p(p, v)
+#define stlr_p(p, v) stl_be_p(p, v)
+#define stqr_p(p, v) stq_be_p(p, v)
+#define stflr_p(p, v) stfl_be_p(p, v)
+#define stfqr_p(p, v) stfq_be_p(p, v)
#endif
/* MMU memory access macros */
@@ -605,6 +631,7 @@ static inline void stfq_be_p(void *ptr,
#define laddr(x) (uint8_t *)(long)(x)
#endif
+/* native-endian */
#define ldub_raw(p) ldub_p(laddr((p)))
#define ldsb_raw(p) ldsb_p(laddr((p)))
#define lduw_raw(p) lduw_p(laddr((p)))
@@ -619,11 +646,26 @@ static inline void stfq_be_p(void *ptr,
#define stq_raw(p, v) stq_p(saddr((p)), v)
#define stfl_raw(p, v) stfl_p(saddr((p)), v)
#define stfq_raw(p, v) stfq_p(saddr((p)), v)
-
+/* reverse endian */
+#define ldubr_raw(p) ldub_p(laddr((p)))
+#define ldsbr_raw(p) ldsb_p(laddr((p)))
+#define lduwr_raw(p) lduwr_p(laddr((p)))
+#define ldswr_raw(p) ldswr_p(laddr((p)))
+#define ldlr_raw(p) ldlr_p(laddr((p)))
+#define ldqr_raw(p) ldqr_p(laddr((p)))
+#define ldflr_raw(p) ldflr_p(laddr((p)))
+#define ldfqr_raw(p) ldfqr_p(laddr((p)))
+#define stbr_raw(p, v) stb_p(saddr((p)), v)
+#define stwr_raw(p, v) stwr_p(saddr((p)), v)
+#define stlr_raw(p, v) stlr_p(saddr((p)), v)
+#define stqr_raw(p, v) stqr_p(saddr((p)), v)
+#define stflr_raw(p, v) stflr_p(saddr((p)), v)
+#define stfqr_raw(p, v) stfqr_p(saddr((p)), v)
#if defined(CONFIG_USER_ONLY)
/* if user mode, no other memory access functions */
+/* native-endian */
#define ldub(p) ldub_raw(p)
#define ldsb(p) ldsb_raw(p)
#define lduw(p) lduw_raw(p)
@@ -638,14 +680,38 @@ static inline void stfq_be_p(void *ptr,
#define stq(p, v) stq_raw(p, v)
#define stfl(p, v) stfl_raw(p, v)
#define stfq(p, v) stfq_raw(p, v)
+/* reverse-endian */
+#define ldubr(p) ldub_raw(p)
+#define ldsbr(p) ldsb_raw(p)
+#define lduwr(p) lduwr_raw(p)
+#define ldswr(p) ldswr_raw(p)
+#define ldlr(p) ldlr_raw(p)
+#define ldqr(p) ldqr_raw(p)
+#define ldflr(p) ldflr_raw(p)
+#define ldfqr(p) ldfqr_raw(p)
+#define stbr(p, v) stb_raw(p, v)
+#define stwr(p, v) stwr_raw(p, v)
+#define stlr(p, v) stlr_raw(p, v)
+#define stqr(p, v) stqr_raw(p, v)
+#define stflr(p, v) stflr_raw(p, v)
+#define stfqr(p, v) stfqr_raw(p, v)
+/* native-endian */
#define ldub_code(p) ldub_raw(p)
#define ldsb_code(p) ldsb_raw(p)
#define lduw_code(p) lduw_raw(p)
#define ldsw_code(p) ldsw_raw(p)
#define ldl_code(p) ldl_raw(p)
#define ldq_code(p) ldq_raw(p)
+/* reverse-endian */
+#define ldubr_code(p) ldub_raw(p)
+#define ldsbr_code(p) ldsb_raw(p)
+#define lduwr_code(p) lduwr_raw(p)
+#define ldswr_code(p) ldswr_raw(p)
+#define ldlr_code(p) ldlr_raw(p)
+#define ldqr_code(p) ldqr_raw(p)
+/* native-endian */
#define ldub_kernel(p) ldub_raw(p)
#define ldsb_kernel(p) ldsb_raw(p)
#define lduw_kernel(p) lduw_raw(p)
@@ -660,6 +726,21 @@ static inline void stfq_be_p(void *ptr,
#define stq_kernel(p, v) stq_raw(p, v)
#define stfl_kernel(p, v) stfl_raw(p, v)
#define stfq_kernel(p, vt) stfq_raw(p, v)
+/* reverse-endian */
+#define ldubr_kernel(p) ldub_raw(p)
+#define ldsbr_kernel(p) ldsb_raw(p)
+#define lduwr_kernel(p) lduwr_raw(p)
+#define ldswr_kernel(p) ldswr_raw(p)
+#define ldlr_kernel(p) ldlr_raw(p)
+#define ldqr_kernel(p) ldqr_raw(p)
+#define ldflr_kernel(p) ldflr_raw(p)
+#define ldfqr_kernel(p) ldfqr_raw(p)
+#define stbr_kernel(p, v) stbr_raw(p, v)
+#define stwr_kernel(p, v) stwr_raw(p, v)
+#define stlr_kernel(p, v) stlr_raw(p, v)
+#define stqr_kernel(p, v) stqr_raw(p, v)
+#define stflr_kernel(p, v) stflr_raw(p, v)
+#define stfqr_kernel(p, vt) stfqr_raw(p, v)
#endif /* defined(CONFIG_USER_ONLY) */
@@ -790,6 +871,8 @@ extern uint8_t *phys_ram_dirty;
the physical address */
#define IO_MEM_ROMD (1)
#define IO_MEM_SUBPAGE (2)
+/* On some target CPUs, endiannes is stored in page tables */
+#define IO_MEM_REVERSE (3)
typedef void CPUWriteMemoryFunc(void *opaque, target_phys_addr_t addr, uint32_t value);
typedef uint32_t CPUReadMemoryFunc(void *opaque, target_phys_addr_t addr);
Index: exec-all.h
===================================================================
RCS file: /sources/qemu/qemu/exec-all.h,v
retrieving revision 1.67
diff -u -d -d -p -r1.67 exec-all.h
--- exec-all.h 8 Oct 2007 13:16:14 -0000 1.67
+++ exec-all.h 12 Oct 2007 07:14:43 -0000
@@ -562,6 +567,7 @@ extern int tb_invalidated_flag;
#define MEMSUFFIX _code
#define env cpu_single_env
+/* native-endian */
#define DATA_SIZE 1
#include "softmmu_header.h"
@@ -581,6 +587,21 @@ void tlb_fill(target_ulong addr, int is_
#define DATA_SIZE 8
#include "softmmu_header.h"
+/* reverse-endian */
+#define REVERSE_ENDIAN
+#define DATA_SIZE 1
+#include "softmmu_header.h"
+
+#define DATA_SIZE 2
+#include "softmmu_header.h"
+
+#define DATA_SIZE 4
+#include "softmmu_header.h"
+
+#define DATA_SIZE 8
+#include "softmmu_header.h"
+#undef REVERSE_ENDIAN
+
#undef ACCESS_TYPE
#undef MEMSUFFIX
#undef env
Index: exec.c
===================================================================
RCS file: /sources/qemu/qemu/exec.c,v
retrieving revision 1.108
diff -u -d -d -p -r1.108 exec.c
--- exec.c 8 Oct 2007 13:16:14 -0000 1.108
+++ exec.c 12 Oct 2007 07:14:43 -0000
@@ -2507,7 +2507,7 @@ void cpu_physical_memory_rw(target_phys_
uint8_t *ptr;
uint32_t val;
target_phys_addr_t page;
- unsigned long pd;
+ unsigned long pd, addr1;
PhysPageDesc *p;
while (len > 0) {
@@ -2524,31 +2524,54 @@ void cpu_physical_memory_rw(target_phys_
if (is_write) {
if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
- io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
- /* XXX: could force cpu_single_env to NULL to avoid
- potential bugs */
- if (l >= 4 && ((addr & 3) == 0)) {
- /* 32 bit write access */
- val = ldl_p(buf);
- io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
- l = 4;
- } else if (l >= 2 && ((addr & 1) == 0)) {
- /* 16 bit write access */
- val = lduw_p(buf);
- io_mem_write[io_index][1](io_mem_opaque[io_index], addr, val);
- l = 2;
+ if (pd & IO_MEM_REVERSE) {
+ /* Specific case for reverse endian page write */
+ addr1 = (pd & TARGET_PAGE_MASK) +
+ (addr & ~TARGET_PAGE_MASK);
+ ptr = phys_ram_base + addr1;
+ for (; l >= 4; l -= 4) {
+ stlr_p(ptr, *(uint32_t *)buf);
+ ptr += 4;
+ buf += 4;
+ }
+ for (; l >= 2; l -= 2) {
+ stwr_p(ptr, *(uint16_t *)buf);
+ ptr += 2;
+ buf += 2;
+ }
+ if (l >= 1)
+ *ptr = *buf;
+ goto invalidate_code;
} else {
- /* 8 bit write access */
- val = ldub_p(buf);
- io_mem_write[io_index][0](io_mem_opaque[io_index], addr, val);
- l = 1;
+ io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
+ /* XXX: could force cpu_single_env to NULL to avoid
+ potential bugs */
+ if (l >= 4 && ((addr & 3) == 0)) {
+ /* 32 bit write access */
+ val = ldl_p(buf);
+ io_mem_write[io_index][2](io_mem_opaque[io_index],
+ addr, val);
+ l = 4;
+ } else if (l >= 2 && ((addr & 1) == 0)) {
+ /* 16 bit write access */
+ val = lduw_p(buf);
+ io_mem_write[io_index][1](io_mem_opaque[io_index],
+ addr, val);
+ l = 2;
+ } else {
+ /* 8 bit write access */
+ val = ldub_p(buf);
+ io_mem_write[io_index][0](io_mem_opaque[io_index],
+ addr, val);
+ l = 1;
+ }
}
} else {
- unsigned long addr1;
addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
/* RAM case */
ptr = phys_ram_base + addr1;
memcpy(ptr, buf, l);
+ invalidate_code:
if (!cpu_physical_memory_is_dirty(addr1)) {
/* invalidate code */
tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
@@ -2560,23 +2583,45 @@ void cpu_physical_memory_rw(target_phys_
} else {
if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
!(pd & IO_MEM_ROMD)) {
- /* I/O case */
- io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
- if (l >= 4 && ((addr & 3) == 0)) {
- /* 32 bit read access */
- val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
- stl_p(buf, val);
- l = 4;
- } else if (l >= 2 && ((addr & 1) == 0)) {
- /* 16 bit read access */
- val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr);
- stw_p(buf, val);
- l = 2;
+ if (pd & IO_MEM_REVERSE) {
+ /* Specific case for reverse endian page write */
+ addr1 = (pd & TARGET_PAGE_MASK) +
+ (addr & ~TARGET_PAGE_MASK);
+ ptr = phys_ram_base + addr1;
+ for (; l >= 4; l -= 4) {
+ *(uint32_t *)buf = ldlr_p(ptr);
+ ptr += 4;
+ buf += 4;
+ }
+ for (; l >= 2; l -= 2) {
+ *(uint16_t *)buf = lduwr_p(ptr);
+ ptr += 2;
+ buf += 2;
+ }
+ if (l >= 1)
+ *buf = *ptr;
} else {
- /* 8 bit read access */
- val = io_mem_read[io_index][0](io_mem_opaque[io_index], addr);
- stb_p(buf, val);
- l = 1;
+ /* I/O case */
+ io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
+ if (l >= 4 && ((addr & 3) == 0)) {
+ /* 32 bit read access */
+ val = io_mem_read[io_index][2](io_mem_opaque[io_index],
+ addr);
+ stl_p(buf, val);
+ l = 4;
+ } else if (l >= 2 && ((addr & 1) == 0)) {
+ /* 16 bit read access */
+ val = io_mem_read[io_index][1](io_mem_opaque[io_index],
+ addr);
+ stw_p(buf, val);
+ l = 2;
+ } else {
+ /* 8 bit read access */
+ val = io_mem_read[io_index][0](io_mem_opaque[io_index],
+ addr);
+ stb_p(buf, val);
+ l = 1;
+ }
}
} else {
/* RAM case */
@@ -2907,6 +2952,21 @@ void dump_exec_info(FILE *f,
#define env cpu_single_env
#define SOFTMMU_CODE_ACCESS
+/* Native-endian */
+#define SHIFT 0
+#include "softmmu_template.h"
+
+#define SHIFT 1
+#include "softmmu_template.h"
+
+#define SHIFT 2
+#include "softmmu_template.h"
+
+#define SHIFT 3
+#include "softmmu_template.h"
+
+/* Reverse-endian */
+#define REVERSE_ENDIAN
#define SHIFT 0
#include "softmmu_template.h"
@@ -2918,6 +2978,7 @@ void dump_exec_info(FILE *f,
#define SHIFT 3
#include "softmmu_template.h"
+#undef REVERSE_ENDIAN
#undef env
Index: softmmu_header.h
===================================================================
RCS file: /sources/qemu/qemu/softmmu_header.h,v
retrieving revision 1.17
diff -u -d -d -p -r1.17 softmmu_header.h
--- softmmu_header.h 8 Oct 2007 13:16:14 -0000 1.17
+++ softmmu_header.h 12 Oct 2007 07:14:43 -0000
@@ -17,6 +17,9 @@
* License along with this library; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
+
+#if !defined(REVERSE_ENDIAN)
+/* native-endian */
#if DATA_SIZE == 8
#define SUFFIX q
#define USUFFIX q
@@ -38,7 +41,31 @@
#else
#error unsupported data size
#endif
+#else /* !defined(REVERSE_ENDIAN) */
+/* reverse-endian */
+#if DATA_SIZE == 8
+#define SUFFIX qr
+#define USUFFIX qr
+#define DATA_TYPE uint64_t
+#elif DATA_SIZE == 4
+#define SUFFIX lr
+#define USUFFIX lr
+#define DATA_TYPE uint32_t
+#elif DATA_SIZE == 2
+#define SUFFIX wr
+#define USUFFIX uwr
+#define DATA_TYPE uint16_t
+#define DATA_STYPE int16_t
+#elif DATA_SIZE == 1
+#define SUFFIX br
+#define USUFFIX ubr
+#define DATA_TYPE uint8_t
+#define DATA_STYPE int8_t
+#else
+#error unsupported data size
+#endif
+#endif /* defined(REVERSE_ENDIAN) */
#if ACCESS_TYPE == 0
#define CPU_MEM_INDEX 0
@@ -322,7 +302,8 @@ static inline void glue(glue(st, SUFFIX)
#endif /* !asm */
#if ACCESS_TYPE != 3
+#if !defined(REVERSE_ENDIAN)
#if DATA_SIZE == 8
static inline float64 glue(ldfq, MEMSUFFIX)(target_ulong ptr)
{
@@ -386,6 +367,54 @@ static inline void glue(stfl, MEMSUFFIX)
}
#endif /* DATA_SIZE == 4 */
+#else /* defined(REVERSE_ENDIAN) */
+
+#if DATA_SIZE == 8
+static inline float64 glue(ldfqr, MEMSUFFIX)(target_ulong ptr)
+{
+ union {
+ float64 d;
+ uint64_t i;
+ } u;
+ u.i = glue(ldqr, MEMSUFFIX)(ptr);
+ return u.d;
+}
+
+static inline void glue(stfqr, MEMSUFFIX)(target_ulong ptr, float64 v)
+{
+ union {
+ float64 d;
+ uint64_t i;
+ } u;
+ u.d = v;
+ glue(stqr, MEMSUFFIX)(ptr, u.i);
+}
+#endif /* DATA_SIZE == 8 */
+
+#if DATA_SIZE == 4
+static inline float32 glue(ldflr, MEMSUFFIX)(target_ulong ptr)
+{
+ union {
+ float32 f;
+ uint32_t i;
+ } u;
+ u.i = glue(ldlr, MEMSUFFIX)(ptr);
+ return u.f;
+}
+
+static inline void glue(stflr, MEMSUFFIX)(target_ulong ptr, float32 v)
+{
+ union {
+ float32 f;
+ uint32_t i;
+ } u;
+ u.f = v;
+ glue(stlr, MEMSUFFIX)(ptr, u.i);
+}
+#endif /* DATA_SIZE == 4 */
+
+#endif /* defined(REVERSE_ENDIAN) */
+
#endif /* ACCESS_TYPE != 3 */
#undef RES_TYPE
Index: softmmu_template.h
===================================================================
RCS file: /sources/qemu/qemu/softmmu_template.h,v
retrieving revision 1.18
diff -u -d -d -p -r1.18 softmmu_template.h
--- softmmu_template.h 17 Sep 2007 08:09:45 -0000 1.18
+++ softmmu_template.h 12 Oct 2007 07:14:43 -0000
@@ -19,25 +19,66 @@
*/
#define DATA_SIZE (1 << SHIFT)
+#if !defined(REVERSE_ENDIAN)
+/* native-endian */
#if DATA_SIZE == 8
#define SUFFIX q
#define USUFFIX q
+#define RSUFFIX qr
+#define URSUFFIX qr
#define DATA_TYPE uint64_t
#elif DATA_SIZE == 4
#define SUFFIX l
#define USUFFIX l
+#define RSUFFIX lr
+#define URSUFFIX lr
#define DATA_TYPE uint32_t
#elif DATA_SIZE == 2
#define SUFFIX w
#define USUFFIX uw
+#define RSUFFIX wr
+#define URSUFFIX uwr
#define DATA_TYPE uint16_t
#elif DATA_SIZE == 1
#define SUFFIX b
#define USUFFIX ub
+#define RSUFFIX br
+#define URSUFFIX ubr
+#define DATA_TYPE uint8_t
+#else
+#error unsupported data size
+#endif
+#else /* !defined(REVERSE_ENDIAN) */
+/* reverse-endian */
+#if DATA_SIZE == 8
+#define SUFFIX qr
+#define USUFFIX qr
+#define RSUFFIX q
+#define URSUFFIX q
+#define DATA_TYPE uint64_t
+#elif DATA_SIZE == 4
+#define SUFFIX lr
+#define USUFFIX lr
+#define RSUFFIX l
+#define URSUFFIX l
+#define DATA_TYPE uint32_t
+#elif DATA_SIZE == 2
+#define SUFFIX wr
+#define USUFFIX uwr
+#define RSUFFIX w
+#define URSUFFIX uw
+#define DATA_TYPE uint16_t
+#elif DATA_SIZE == 1
+#define SUFFIX br
+#define USUFFIX ubr
+#define RSUFFIX b
+#define URSUFFIX ub
#define DATA_TYPE uint8_t
#else
#error unsupported data size
#endif
+#endif /* defined(REVERSE_ENDIAN) */
+
#ifdef SOFTMMU_CODE_ACCESS
#define READ_ACCESS_TYPE 2
@@ -47,6 +88,24 @@
#define ADDR_READ addr_read
#endif
+#if (defined(TARGET_WORDS_BIGENDIAN) && !defined(REVERSE_ENDIAN)) || \
+ (!defined(TARGET_WORDS_BIGENDIAN) && defined(REVERSE_ENDIAN))
+#define ACCESS_WORDS_BIGENDIAN
+#endif
+
+/* Beware: we do not have reverse-endian accessors for IOs */
+#if defined(REVERSE_ENDIAN)
+#if SHIFT == 1
+#define IOSWAP(val) bswap16(val)
+#elif SHIFT >= 2
+#define IOSWAP(val) bswap32(val)
+#else
+#define IOSWAP(val) (val)
+#endif
+#else
+#define IOSWAP(val) (val)
+#endif
+
static DATA_TYPE glue(glue(slow_ld, SUFFIX), MMUSUFFIX)(target_ulong addr,
int is_user,
void *retaddr);
@@ -59,13 +118,16 @@ static inline DATA_TYPE glue(io_read, SU
index = (tlb_addr >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
#if SHIFT <= 2
res = io_mem_read[index][SHIFT](io_mem_opaque[index], physaddr);
+ res = IOSWAP(res);
#else
-#ifdef TARGET_WORDS_BIGENDIAN
- res = (uint64_t)io_mem_read[index][2](io_mem_opaque[index], physaddr) << 32;
- res |= io_mem_read[index][2](io_mem_opaque[index], physaddr + 4);
+#ifdef ACCESS_WORDS_BIGENDIAN
+ res = (uint64_t)IOSWAP(io_mem_read[index][2](io_mem_opaque[index],
+ physaddr)) << 32;
+ res |= IOSWAP(io_mem_read[index][2](io_mem_opaque[index], physaddr + 4));
#else
- res = io_mem_read[index][2](io_mem_opaque[index], physaddr);
- res |= (uint64_t)io_mem_read[index][2](io_mem_opaque[index], physaddr + 4) << 32;
+ res = IOSWAP(io_mem_read[index][2](io_mem_opaque[index], physaddr));
+ res |= (uint64_t)IOSWAP(io_mem_read[index][2](io_mem_opaque[index],
+ physaddr + 4)) << 32;
#endif
#endif /* SHIFT > 2 */
#ifdef USE_KQEMU
@@ -88,9 +150,33 @@ DATA_TYPE REGPARM(1) glue(glue(__ld, SUF
if ((addr & TARGET_PAGE_MASK) == (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
physaddr = addr + env->tlb_table[is_user][index].addend;
if (tlb_addr & ~TARGET_PAGE_MASK) {
- /* IO access */
- if ((addr & (DATA_SIZE - 1)) != 0)
- goto do_unaligned_access;
- res = glue(io_read, SUFFIX)(physaddr, tlb_addr);
+ if (tlb_addr & IO_MEM_REVERSE) {
+ /* Specific case for reverse endian page read */
+ if (((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1) >=
+ TARGET_PAGE_SIZE) {
+ /* slow unaligned access (it spans two pages or IO) */
+ retaddr = GETPC();
+#ifdef ALIGNED_ONLY
+ do_unaligned_access(addr, READ_ACCESS_TYPE,
+ is_user, retaddr);
+#endif
+ res = glue(glue(slow_ld, SUFFIX), MMUSUFFIX)(addr, is_user,
+ retaddr);
+ } else {
+#ifdef ALIGNED_ONLY
+ if ((addr & (DATA_SIZE - 1)) != 0) {
+ retaddr = GETPC();
+ do_unaligned_access(addr, READ_ACCESS_TYPE,
+ is_user, retaddr);
+ }
+#endif
+ res = glue(glue(ld, URSUFFIX), _raw)((uint8_t *)(long)physaddr);
+ }
+ } else {
+ /* IO access */
+ if ((addr & (DATA_SIZE - 1)) != 0)
+ goto do_unaligned_access;
+ res = glue(io_read, SUFFIX)(physaddr, tlb_addr);
+ }
} else if (((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1) >= TARGET_PAGE_SIZE) {
/* slow unaligned access (it spans two pages or IO) */
@@ -140,9 +226,37 @@ static DATA_TYPE glue(glue(slow_ld, SUFF
if ((addr & TARGET_PAGE_MASK) == (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
physaddr = addr + env->tlb_table[is_user][index].addend;
if (tlb_addr & ~TARGET_PAGE_MASK) {
- /* IO access */
- if ((addr & (DATA_SIZE - 1)) != 0)
- goto do_unaligned_access;
- res = glue(io_read, SUFFIX)(physaddr, tlb_addr);
+ if (tlb_addr & IO_MEM_REVERSE) {
+ /* Specific case for reverse endian page write */
+ if (((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1) >=
+ TARGET_PAGE_SIZE) {
+ /* slow unaligned access (it spans two pages) */
+ addr1 = addr & ~(DATA_SIZE - 1);
+ addr2 = addr1 + DATA_SIZE;
+ res1 = glue(glue(slow_ld, SUFFIX), MMUSUFFIX)(addr1,
+ is_user,
+ retaddr);
+ res2 = glue(glue(slow_ld, SUFFIX), MMUSUFFIX)(addr2,
+ is_user,
+ retaddr);
+ shift = (addr & (DATA_SIZE - 1)) * 8;
+#ifdef ACCESS_WORDS_BIGENDIAN
+ res = (res1 >> shift) |
+ (res2 << ((DATA_SIZE * 8) - shift));
+#else
+ res = (res1 << shift) |
+ (res2 >> ((DATA_SIZE * 8) - shift));
+#endif
+ res = (DATA_TYPE)res;
+ } else {
+ /* unaligned/aligned access in the same page */
+ res = glue(glue(ld, URSUFFIX), _raw)((uint8_t *)(long)physaddr);
+ }
+ } else {
+ /* IO access */
+ if ((addr & (DATA_SIZE - 1)) != 0)
+ goto do_unaligned_access;
+ res = glue(io_read, SUFFIX)(physaddr, tlb_addr);
+ }
} else if (((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1) >= TARGET_PAGE_SIZE) {
do_unaligned_access:
@@ -194,14 +308,16 @@ static inline void glue(io_write, SUFFIX
env->mem_write_vaddr = tlb_addr;
env->mem_write_pc = (unsigned long)retaddr;
#if SHIFT <= 2
+ val = IOSWAP(val);
io_mem_write[index][SHIFT](io_mem_opaque[index], physaddr, val);
#else
-#ifdef TARGET_WORDS_BIGENDIAN
- io_mem_write[index][2](io_mem_opaque[index], physaddr, val >> 32);
- io_mem_write[index][2](io_mem_opaque[index], physaddr + 4, val);
+#ifdef ACCESS_WORDS_BIGENDIAN
+ io_mem_write[index][2](io_mem_opaque[index], physaddr, IOSWAP(val >> 32));
+ io_mem_write[index][2](io_mem_opaque[index], physaddr + 4, IOSWAP(val));
#else
- io_mem_write[index][2](io_mem_opaque[index], physaddr, val);
- io_mem_write[index][2](io_mem_opaque[index], physaddr + 4, val >> 32);
+ io_mem_write[index][2](io_mem_opaque[index], physaddr, IOSWAP(val));
+ io_mem_write[index][2](io_mem_opaque[index], physaddr + 4,
+ IOSWAP(val >> 32));
#endif
#endif /* SHIFT > 2 */
#ifdef USE_KQEMU
@@ -220,12 +336,36 @@ void REGPARM(2) glue(glue(__st, SUFFIX),
if ((addr & TARGET_PAGE_MASK) == (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
physaddr = addr + env->tlb_table[is_user][index].addend;
if (tlb_addr & ~TARGET_PAGE_MASK) {
- /* IO access */
- if ((addr & (DATA_SIZE - 1)) != 0)
- goto do_unaligned_access;
- retaddr = GETPC();
- glue(io_write, SUFFIX)(physaddr, val, tlb_addr, retaddr);
+ if (tlb_addr & IO_MEM_REVERSE) {
+ /* Specific case for reverse endian page read */
+ if (((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1) >=
+ TARGET_PAGE_SIZE) {
+ /* slow unaligned access (it spans two pages or IO) */
+ retaddr = GETPC();
+#ifdef ALIGNED_ONLY
+ do_unaligned_access(addr, 1, is_user, retaddr);
+#endif
+ glue(glue(slow_st, SUFFIX), MMUSUFFIX)(addr, val,
+ is_user, retaddr);
+ } else {
+#ifdef ALIGNED_ONLY
+ if ((addr & (DATA_SIZE - 1)) != 0) {
+ retaddr = GETPC();
+ do_unaligned_access(addr, 1, is_user, retaddr);
+ }
+#endif
+ glue(glue(st, RSUFFIX), _raw)((uint8_t *)(long)physaddr,
+ val);
+ }
+ } else {
+ /* IO access */
+ if ((addr & (DATA_SIZE - 1)) != 0)
+ goto do_unaligned_access;
+ retaddr = GETPC();
+ glue(io_write, SUFFIX)(physaddr, val, tlb_addr, retaddr);
+ }
} else if (((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1) >= TARGET_PAGE_SIZE) {
+ /* slow unaligned access (it spans two pages or IO) */
do_unaligned_access:
retaddr = GETPC();
#ifdef ALIGNED_ONLY
@@ -271,15 +411,39 @@ static void glue(glue(slow_st, SUFFIX),
if ((addr & TARGET_PAGE_MASK) == (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
physaddr = addr + env->tlb_table[is_user][index].addend;
if (tlb_addr & ~TARGET_PAGE_MASK) {
- /* IO access */
- if ((addr & (DATA_SIZE - 1)) != 0)
- goto do_unaligned_access;
- glue(io_write, SUFFIX)(physaddr, val, tlb_addr, retaddr);
+ if (tlb_addr & IO_MEM_REVERSE) {
+ /* Specific case for reverse endian page read */
+ if (((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1) >=
+ TARGET_PAGE_SIZE) {
+ /* slow unaligned access (it spans two pages or IO) */
+ /* XXX: not efficient, but simple */
+ for(i = 0;i < DATA_SIZE; i++) {
+#ifdef ACCESS_WORDS_BIGENDIAN
+ glue(slow_stb, MMUSUFFIX)(addr + i, val >> (i * 8),
+ is_user, retaddr);
+#else
+ glue(slow_stb, MMUSUFFIX)(addr + i,
+ val >> (((DATA_SIZE - 1) * 8)
+ - (i * 8)),
+ is_user, retaddr);
+#endif
+ }
+ } else {
+ /* aligned/unaligned access in the same page */
+ glue(glue(st, RSUFFIX), _raw)((uint8_t *)(long)physaddr,
+ val);
+ }
+ } else {
+ /* IO access */
+ if ((addr & (DATA_SIZE - 1)) != 0)
+ goto do_unaligned_access;
+ glue(io_write, SUFFIX)(physaddr, val, tlb_addr, retaddr);
+ }
} else if (((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1) >= TARGET_PAGE_SIZE) {
do_unaligned_access:
/* XXX: not efficient, but simple */
for(i = 0;i < DATA_SIZE; i++) {
-#ifdef TARGET_WORDS_BIGENDIAN
+#ifdef ACCESS_WORDS_BIGENDIAN
glue(slow_stb, MMUSUFFIX)(addr + i, val >> (((DATA_SIZE - 1) * 8) - (i * 8)),
is_user, retaddr);
#else
@@ -297,10 +461,14 @@ static void glue(glue(slow_st, SUFFIX),
#endif /* !defined(SOFTMMU_CODE_ACCESS) */
+#undef IOSWAP
+#undef ACCESS_WORDS_BIGENDIAN
#undef READ_ACCESS_TYPE
#undef SHIFT
#undef DATA_TYPE
#undef SUFFIX
#undef USUFFIX
+#undef RSUFFIX
+#undef URSUFFIX
#undef DATA_SIZE
#undef ADDR_READ
Index: target-alpha/exec.h
===================================================================
RCS file: /sources/qemu/qemu/target-alpha/exec.h,v
retrieving revision 1.3
diff -u -d -d -p -r1.3 exec.h
--- target-alpha/exec.h 16 Sep 2007 21:08:01 -0000 1.3
+++ target-alpha/exec.h 12 Oct 2007 07:14:46 -0000
@@ -62,6 +62,9 @@ register uint64_t T2 asm(AREG3);
#if !defined(CONFIG_USER_ONLY)
#include "softmmu_exec.h"
+#define REVERSE_ENDIAN
+#include "softmmu_exec.h"
+#undef REVERSE_ENDIAN
#endif /* !defined(CONFIG_USER_ONLY) */
static inline void env_to_regs(void)
Index: target-alpha/op_helper.c
===================================================================
RCS file: /sources/qemu/qemu/target-alpha/op_helper.c,v
retrieving revision 1.2
diff -u -d -d -p -r1.2 op_helper.c
--- target-alpha/op_helper.c 16 Sep 2007 21:08:01 -0000 1.2
+++ target-alpha/op_helper.c 12 Oct 2007 07:14:46 -0000
@@ -1207,6 +1207,21 @@ void helper_st_phys_to_virt (void)
#define MMUSUFFIX _mmu
+/* Native-endian */
+#define SHIFT 0
+#include "softmmu_template.h"
+
+#define SHIFT 1
+#include "softmmu_template.h"
+
+#define SHIFT 2
+#include "softmmu_template.h"
+
+#define SHIFT 3
+#include "softmmu_template.h"
+
+/* Reverse-endian */
+#define REVERSE_ENDIAN
#define SHIFT 0
#include "softmmu_template.h"
@@ -1218,6 +1233,7 @@ void helper_st_phys_to_virt (void)
#define SHIFT 3
#include "softmmu_template.h"
+#undef REVERSE_ENDIAN
/* try to fill the TLB and return an exception if error. If retaddr is
NULL, it means that the function was called in C code (i.e. not
Index: target-arm/exec.h
===================================================================
RCS file: /sources/qemu/qemu/target-arm/exec.h,v
retrieving revision 1.13
diff -u -d -d -p -r1.13 exec.h
--- target-arm/exec.h 16 Sep 2007 21:08:01 -0000 1.13
+++ target-arm/exec.h 12 Oct 2007 07:14:47 -0000
@@ -64,6 +64,9 @@ static inline int cpu_halted(CPUState *e
#if !defined(CONFIG_USER_ONLY)
#include "softmmu_exec.h"
+#define REVERSE_ENDIAN
+#include "softmmu_exec.h"
+#undef REVERSE_ENDIAN
#endif
/* In op_helper.c */
Index: target-arm/op_helper.c
===================================================================
RCS file: /sources/qemu/qemu/target-arm/op_helper.c,v
retrieving revision 1.6
diff -u -d -d -p -r1.6 op_helper.c
--- target-arm/op_helper.c 16 Sep 2007 21:08:02 -0000 1.6
+++ target-arm/op_helper.c 12 Oct 2007 07:14:47 -0000
@@ -180,6 +180,21 @@ void do_vfp_get_fpscr(void)
#define MMUSUFFIX _mmu
#define GETPC() (__builtin_return_address(0))
+/* Native-endian */
+#define SHIFT 0
+#include "softmmu_template.h"
+
+#define SHIFT 1
+#include "softmmu_template.h"
+
+#define SHIFT 2
+#include "softmmu_template.h"
+
+#define SHIFT 3
+#include "softmmu_template.h"
+
+/* Reverse-endian */
+#define REVERSE_ENDIAN
#define SHIFT 0
#include "softmmu_template.h"
@@ -191,6 +206,7 @@ void do_vfp_get_fpscr(void)
#define SHIFT 3
#include "softmmu_template.h"
+#undef REVERSE_ENDIAN
/* try to fill the TLB and return an exception if error. If retaddr is
NULL, it means that the function was called in C code (i.e. not
Index: target-cris/exec.h
===================================================================
RCS file: /sources/qemu/qemu/target-cris/exec.h,v
retrieving revision 1.1
diff -u -d -d -p -r1.1 exec.h
--- target-cris/exec.h 8 Oct 2007 13:04:02 -0000 1.1
+++ target-cris/exec.h 12 Oct 2007 07:14:47 -0000
@@ -45,6 +45,9 @@ static inline void regs_to_env(void)
#if !defined(CONFIG_USER_ONLY)
#include "softmmu_exec.h"
+#define REVERSE_ENDIAN
+#include "softmmu_exec.h"
+#undef REVERSE_ENDIAN
#endif
void cpu_cris_flush_flags(CPUCRISState *env, int cc_op);
Index: target-cris/op_helper.c
===================================================================
RCS file: /sources/qemu/qemu/target-cris/op_helper.c,v
retrieving revision 1.1
diff -u -d -d -p -r1.1 op_helper.c
--- target-cris/op_helper.c 8 Oct 2007 13:04:02 -0000 1.1
+++ target-cris/op_helper.c 12 Oct 2007 07:14:47 -0000
@@ -25,6 +25,21 @@
#define MMUSUFFIX _mmu
#define GETPC() (__builtin_return_address(0))
+/* Native-endian */
+#define SHIFT 0
+#include "softmmu_template.h"
+
+#define SHIFT 1
+#include "softmmu_template.h"
+
+#define SHIFT 2
+#include "softmmu_template.h"
+
+#define SHIFT 3
+#include "softmmu_template.h"
+
+/* Reverse-endian */
+#define REVERSE_ENDIAN
#define SHIFT 0
#include "softmmu_template.h"
@@ -36,6 +51,7 @@
#define SHIFT 3
#include "softmmu_template.h"
+#undef REVERSE_ENDIAN
/* Try to fill the TLB and return an exception if error. If retaddr is
NULL, it means that the function was called in C code (i.e. not
Index: target-i386/exec.h
===================================================================
RCS file: /sources/qemu/qemu/target-i386/exec.h,v
retrieving revision 1.37
diff -u -d -d -p -r1.37 exec.h
--- target-i386/exec.h 23 Sep 2007 15:28:04 -0000 1.37
+++ target-i386/exec.h 12 Oct 2007 07:14:47 -0000
@@ -217,6 +217,9 @@ void check_iol_DX(void);
#if !defined(CONFIG_USER_ONLY)
#include "softmmu_exec.h"
+#define REVERSE_ENDIAN
+#include "softmmu_exec.h"
+#undef REVERSE_ENDIAN
static inline double ldfq(target_ulong ptr)
{
Index: target-i386/helper.c
===================================================================
RCS file: /sources/qemu/qemu/target-i386/helper.c,v
retrieving revision 1.89
diff -u -d -d -p -r1.89 helper.c
--- target-i386/helper.c 27 Sep 2007 01:52:00 -0000 1.89
+++ target-i386/helper.c 12 Oct 2007 07:14:47 -0000
@@ -3867,6 +3867,21 @@ void update_fp_status(void)
#define MMUSUFFIX _mmu
#define GETPC() (__builtin_return_address(0))
+/* Native-endian */
+#define SHIFT 0
+#include "softmmu_template.h"
+
+#define SHIFT 1
+#include "softmmu_template.h"
+
+#define SHIFT 2
+#include "softmmu_template.h"
+
+#define SHIFT 3
+#include "softmmu_template.h"
+
+/* Reverse-endian */
+#define REVERSE_ENDIAN
#define SHIFT 0
#include "softmmu_template.h"
@@ -3878,6 +3893,7 @@ void update_fp_status(void)
#define SHIFT 3
#include "softmmu_template.h"
+#undef REVERSE_ENDIAN
#endif
Index: target-m68k/exec.h
===================================================================
RCS file: /sources/qemu/qemu/target-m68k/exec.h,v
retrieving revision 1.4
diff -u -d -d -p -r1.4 exec.h
--- target-m68k/exec.h 16 Sep 2007 21:08:03 -0000 1.4
+++ target-m68k/exec.h 12 Oct 2007 07:14:47 -0000
@@ -38,6 +38,9 @@ static inline void regs_to_env(void)
#if !defined(CONFIG_USER_ONLY)
#include "softmmu_exec.h"
+#define REVERSE_ENDIAN
+#include "softmmu_exec.h"
+#undef REVERSE_ENDIAN
#endif
void cpu_m68k_flush_flags(CPUM68KState *env, int cc_op);
Index: target-m68k/op_helper.c
===================================================================
RCS file: /sources/qemu/qemu/target-m68k/op_helper.c,v
retrieving revision 1.6
diff -u -d -d -p -r1.6 op_helper.c
--- target-m68k/op_helper.c 16 Sep 2007 21:08:03 -0000 1.6
+++ target-m68k/op_helper.c 12 Oct 2007 07:14:47 -0000
@@ -33,6 +33,21 @@ extern int semihosting_enabled;
#define MMUSUFFIX _mmu
#define GETPC() (__builtin_return_address(0))
+/* Native-endian */
+#define SHIFT 0
+#include "softmmu_template.h"
+
+#define SHIFT 1
+#include "softmmu_template.h"
+
+#define SHIFT 2
+#include "softmmu_template.h"
+
+#define SHIFT 3
+#include "softmmu_template.h"
+
+/* Reverse-endian */
+#define REVERSE_ENDIAN
#define SHIFT 0
#include "softmmu_template.h"
@@ -44,6 +59,7 @@ extern int semihosting_enabled;
#define SHIFT 3
#include "softmmu_template.h"
+#undef REVERSE_ENDIAN
/* Try to fill the TLB and return an exception if error. If retaddr is
NULL, it means that the function was called in C code (i.e. not
Index: target-mips/exec.h
===================================================================
RCS file: /sources/qemu/qemu/target-mips/exec.h,v
retrieving revision 1.38
diff -u -d -d -p -r1.38 exec.h
--- target-mips/exec.h 9 Oct 2007 03:39:58 -0000 1.38
+++ target-mips/exec.h 12 Oct 2007 07:14:48 -0000
@@ -54,6 +54,9 @@ register target_ulong T2 asm(AREG3);
#if !defined(CONFIG_USER_ONLY)
#include "softmmu_exec.h"
+#define REVERSE_ENDIAN
+#include "softmmu_exec.h"
+#undef REVERSE_ENDIAN
#endif /* !defined(CONFIG_USER_ONLY) */
#if defined(TARGET_MIPSN32) || defined(TARGET_MIPS64)
Index: target-mips/op_helper.c
===================================================================
RCS file: /sources/qemu/qemu/target-mips/op_helper.c,v
retrieving revision 1.65
diff -u -d -d -p -r1.65 op_helper.c
--- target-mips/op_helper.c 9 Oct 2007 03:39:58 -0000 1.65
+++ target-mips/op_helper.c 12 Oct 2007 07:14:48 -0000
@@ -544,6 +544,21 @@ static void do_unaligned_access (target_
#define MMUSUFFIX _mmu
#define ALIGNED_ONLY
+/* Native-endian */
+#define SHIFT 0
+#include "softmmu_template.h"
+
+#define SHIFT 1
+#include "softmmu_template.h"
+
+#define SHIFT 2
+#include "softmmu_template.h"
+
+#define SHIFT 3
+#include "softmmu_template.h"
+
+/* Reverse-endian */
+#define REVERSE_ENDIAN
#define SHIFT 0
#include "softmmu_template.h"
@@ -555,6 +570,7 @@ static void do_unaligned_access (target_
#define SHIFT 3
#include "softmmu_template.h"
+#undef REVERSE_ENDIAN
static void do_unaligned_access (target_ulong addr, int is_write, int is_user, void *retaddr)
{
Index: target-ppc/exec.h
===================================================================
RCS file: /sources/qemu/qemu/target-ppc/exec.h,v
retrieving revision 1.28
diff -u -d -d -p -r1.28 exec.h
--- target-ppc/exec.h 7 Oct 2007 18:19:25 -0000 1.28
+++ target-ppc/exec.h 12 Oct 2007 07:14:48 -0000
@@ -91,7 +91,12 @@ static always_inline target_ulong rotl64
#endif
#if !defined(CONFIG_USER_ONLY)
+
#include "softmmu_exec.h"
+#define REVERSE_ENDIAN
+#include "softmmu_exec.h"
+#undef REVERSE_ENDIAN
+
#endif /* !defined(CONFIG_USER_ONLY) */
void do_raise_exception_err (uint32_t exception, int error_code);
Index: target-ppc/op_helper.c
===================================================================
RCS file: /sources/qemu/qemu/target-ppc/op_helper.c,v
retrieving revision 1.49
diff -u -d -d -p -r1.49 op_helper.c
--- target-ppc/op_helper.c 7 Oct 2007 17:13:43 -0000 1.49
+++ target-ppc/op_helper.c 12 Oct 2007 07:14:49 -0000
@@ -2291,6 +2301,7 @@ DO_SPE_OP1(fsctuf);
#define MMUSUFFIX _mmu
#define GETPC() (__builtin_return_address(0))
+/* Native-endian */
#define SHIFT 0
#include "softmmu_template.h"
@@ -2303,6 +2314,21 @@ DO_SPE_OP1(fsctuf);
#define SHIFT 3
#include "softmmu_template.h"
+/* Reverse-endian */
+#define REVERSE_ENDIAN
+#define SHIFT 0
+#include "softmmu_template.h"
+
+#define SHIFT 1
+#include "softmmu_template.h"
+
+#define SHIFT 2
+#include "softmmu_template.h"
+
+#define SHIFT 3
+#include "softmmu_template.h"
+#undef REVERSE_ENDIAN
+
/* try to fill the TLB and return an exception if error. If retaddr is
NULL, it means that the function was called in C code (i.e. not
from generated code or from helper.c) */
Index: target-ppc/op_helper.h
===================================================================
RCS file: /sources/qemu/qemu/target-ppc/op_helper.h,v
retrieving revision 1.21
diff -u -d -d -p -r1.21 op_helper.h
--- target-ppc/op_helper.h 7 Oct 2007 17:13:44 -0000 1.21
+++ target-ppc/op_helper.h 12 Oct 2007 07:14:49 -0000
@@ -37,19 +37,6 @@ void glue(do_POWER2_lfq_le, MEMSUFFIX) (
void glue(do_POWER2_stfq, MEMSUFFIX) (void);
void glue(do_POWER2_stfq_le, MEMSUFFIX) (void);
-#if defined(TARGET_PPC64)
-void glue(do_lsw_64, MEMSUFFIX) (int dst);
-void glue(do_lsw_le_64, MEMSUFFIX) (int dst);
-void glue(do_stsw_64, MEMSUFFIX) (int src);
-void glue(do_stsw_le_64, MEMSUFFIX) (int src);
-void glue(do_lmw_64, MEMSUFFIX) (int dst);
-void glue(do_lmw_le_64, MEMSUFFIX) (int dst);
-void glue(do_stmw_64, MEMSUFFIX) (int src);
-void glue(do_stmw_le_64, MEMSUFFIX) (int src);
-void glue(do_icbi_64, MEMSUFFIX) (void);
-void glue(do_dcbz_64, MEMSUFFIX) (void);
-#endif
-
#else
void do_print_mem_EA (target_ulong EA);
Index: target-ppc/op_helper_mem.h
===================================================================
RCS file: /sources/qemu/qemu/target-ppc/op_helper_mem.h,v
retrieving revision 1.14
diff -u -d -d -p -r1.14 op_helper_mem.h
--- target-ppc/op_helper_mem.h 7 Oct 2007 17:13:44 -0000 1.14
+++ target-ppc/op_helper_mem.h 12 Oct 2007 07:14:49 -0000
@@ -19,85 +19,33 @@
*/
/* Multiple word / string load and store */
-static always_inline target_ulong glue(ld32r, MEMSUFFIX) (target_ulong EA)
-{
- uint32_t tmp = glue(ldl, MEMSUFFIX)(EA);
- return ((tmp & 0xFF000000UL) >> 24) | ((tmp & 0x00FF0000UL) >> 8) |
- ((tmp & 0x0000FF00UL) << 8) | ((tmp & 0x000000FFUL) << 24);
-}
-
-static always_inline void glue(st32r, MEMSUFFIX) (target_ulong EA,
- target_ulong data)
-{
- uint32_t tmp =
- ((data & 0xFF000000UL) >> 24) | ((data & 0x00FF0000UL) >> 8) |
- ((data & 0x0000FF00UL) << 8) | ((data & 0x000000FFUL) << 24);
- glue(stl, MEMSUFFIX)(EA, tmp);
-}
-
void glue(do_lmw, MEMSUFFIX) (int dst)
{
for (; dst < 32; dst++, T0 += 4) {
- env->gpr[dst] = glue(ldl, MEMSUFFIX)((uint32_t)T0);
- }
-}
-
-#if defined(TARGET_PPC64)
-void glue(do_lmw_64, MEMSUFFIX) (int dst)
-{
- for (; dst < 32; dst++, T0 += 4) {
- env->gpr[dst] = glue(ldl, MEMSUFFIX)((uint64_t)T0);
+ env->gpr[dst] = glue(ldl, MEMSUFFIX)(T0);
}
}
-#endif
void glue(do_stmw, MEMSUFFIX) (int src)
{
for (; src < 32; src++, T0 += 4) {
- glue(stl, MEMSUFFIX)((uint32_t)T0, env->gpr[src]);
- }
-}
-
-#if defined(TARGET_PPC64)
-void glue(do_stmw_64, MEMSUFFIX) (int src)
-{
- for (; src < 32; src++, T0 += 4) {
- glue(stl, MEMSUFFIX)((uint64_t)T0, env->gpr[src]);
+ glue(stl, MEMSUFFIX)(T0, env->gpr[src]);
}
}
-#endif
void glue(do_lmw_le, MEMSUFFIX) (int dst)
{
for (; dst < 32; dst++, T0 += 4) {
- env->gpr[dst] = glue(ld32r, MEMSUFFIX)((uint32_t)T0);
- }
-}
-
-#if defined(TARGET_PPC64)
-void glue(do_lmw_le_64, MEMSUFFIX) (int dst)
-{
- for (; dst < 32; dst++, T0 += 4) {
- env->gpr[dst] = glue(ld32r, MEMSUFFIX)((uint64_t)T0);
+ env->gpr[dst] = glue(ldlr, MEMSUFFIX)(T0);
}
}
-#endif
void glue(do_stmw_le, MEMSUFFIX) (int src)
{
for (; src < 32; src++, T0 += 4) {
- glue(st32r, MEMSUFFIX)((uint32_t)T0, env->gpr[src]);
- }
-}
-
-#if defined(TARGET_PPC64)
-void glue(do_stmw_le_64, MEMSUFFIX) (int src)
-{
- for (; src < 32; src++, T0 += 4) {
- glue(st32r, MEMSUFFIX)((uint64_t)T0, env->gpr[src]);
+ glue(stlr, MEMSUFFIX)(T0, env->gpr[src]);
}
}
-#endif
void glue(do_lsw, MEMSUFFIX) (int dst)
{
@@ -105,71 +53,33 @@ void glue(do_lsw, MEMSUFFIX) (int dst)
int sh;
for (; T1 > 3; T1 -= 4, T0 += 4) {
- env->gpr[dst++] = glue(ldl, MEMSUFFIX)((uint32_t)T0);
- if (unlikely(dst == 32))
- dst = 0;
- }
- if (unlikely(T1 != 0)) {
- tmp = 0;
- for (sh = 24; T1 > 0; T1--, T0++, sh -= 8) {
- tmp |= glue(ldub, MEMSUFFIX)((uint32_t)T0) << sh;
- }
- env->gpr[dst] = tmp;
- }
-}
-
-#if defined(TARGET_PPC64)
-void glue(do_lsw_64, MEMSUFFIX) (int dst)
-{
- uint32_t tmp;
- int sh;
-
- for (; T1 > 3; T1 -= 4, T0 += 4) {
- env->gpr[dst++] = glue(ldl, MEMSUFFIX)((uint64_t)T0);
+ env->gpr[dst++] = glue(ldl, MEMSUFFIX)(T0);
if (unlikely(dst == 32))
dst = 0;
}
if (unlikely(T1 != 0)) {
tmp = 0;
for (sh = 24; T1 > 0; T1--, T0++, sh -= 8) {
- tmp |= glue(ldub, MEMSUFFIX)((uint64_t)T0) << sh;
+ tmp |= glue(ldub, MEMSUFFIX)(T0) << sh;
}
env->gpr[dst] = tmp;
}
}
-#endif
void glue(do_stsw, MEMSUFFIX) (int src)
{
int sh;
for (; T1 > 3; T1 -= 4, T0 += 4) {
- glue(stl, MEMSUFFIX)((uint32_t)T0, env->gpr[src++]);
- if (unlikely(src == 32))
- src = 0;
- }
- if (unlikely(T1 != 0)) {
- for (sh = 24; T1 > 0; T1--, T0++, sh -= 8)
- glue(stb, MEMSUFFIX)((uint32_t)T0, (env->gpr[src] >> sh) & 0xFF);
- }
-}
-
-#if defined(TARGET_PPC64)
-void glue(do_stsw_64, MEMSUFFIX) (int src)
-{
- int sh;
-
- for (; T1 > 3; T1 -= 4, T0 += 4) {
- glue(stl, MEMSUFFIX)((uint64_t)T0, env->gpr[src++]);
+ glue(stl, MEMSUFFIX)(T0, env->gpr[src++]);
if (unlikely(src == 32))
src = 0;
}
if (unlikely(T1 != 0)) {
for (sh = 24; T1 > 0; T1--, T0++, sh -= 8)
- glue(stb, MEMSUFFIX)((uint64_t)T0, (env->gpr[src] >> sh) & 0xFF);
+ glue(stb, MEMSUFFIX)(T0, (env->gpr[src] >> sh) & 0xFF);
}
}
-#endif
void glue(do_lsw_le, MEMSUFFIX) (int dst)
{
@@ -177,71 +87,33 @@ void glue(do_lsw_le, MEMSUFFIX) (int dst
int sh;
for (; T1 > 3; T1 -= 4, T0 += 4) {
- env->gpr[dst++] = glue(ld32r, MEMSUFFIX)((uint32_t)T0);
- if (unlikely(dst == 32))
- dst = 0;
- }
- if (unlikely(T1 != 0)) {
- tmp = 0;
- for (sh = 0; T1 > 0; T1--, T0++, sh += 8) {
- tmp |= glue(ldub, MEMSUFFIX)((uint32_t)T0) << sh;
- }
- env->gpr[dst] = tmp;
- }
-}
-
-#if defined(TARGET_PPC64)
-void glue(do_lsw_le_64, MEMSUFFIX) (int dst)
-{
- uint32_t tmp;
- int sh;
-
- for (; T1 > 3; T1 -= 4, T0 += 4) {
- env->gpr[dst++] = glue(ld32r, MEMSUFFIX)((uint64_t)T0);
+ env->gpr[dst++] = glue(ldlr, MEMSUFFIX)(T0);
if (unlikely(dst == 32))
dst = 0;
}
if (unlikely(T1 != 0)) {
tmp = 0;
for (sh = 0; T1 > 0; T1--, T0++, sh += 8) {
- tmp |= glue(ldub, MEMSUFFIX)((uint64_t)T0) << sh;
+ tmp |= glue(ldub, MEMSUFFIX)(T0) << sh;
}
env->gpr[dst] = tmp;
}
}
-#endif
void glue(do_stsw_le, MEMSUFFIX) (int src)
{
int sh;
for (; T1 > 3; T1 -= 4, T0 += 4) {
- glue(st32r, MEMSUFFIX)((uint32_t)T0, env->gpr[src++]);
- if (unlikely(src == 32))
- src = 0;
- }
- if (unlikely(T1 != 0)) {
- for (sh = 0; T1 > 0; T1--, T0++, sh += 8)
- glue(stb, MEMSUFFIX)((uint32_t)T0, (env->gpr[src] >> sh) & 0xFF);
- }
-}
-
-#if defined(TARGET_PPC64)
-void glue(do_stsw_le_64, MEMSUFFIX) (int src)
-{
- int sh;
-
- for (; T1 > 3; T1 -= 4, T0 += 4) {
- glue(st32r, MEMSUFFIX)((uint64_t)T0, env->gpr[src++]);
+ glue(stlr, MEMSUFFIX)(T0, env->gpr[src++]);
if (unlikely(src == 32))
src = 0;
}
if (unlikely(T1 != 0)) {
for (sh = 0; T1 > 0; T1--, T0++, sh += 8)
- glue(stb, MEMSUFFIX)((uint64_t)T0, (env->gpr[src] >> sh) & 0xFF);
+ glue(stb, MEMSUFFIX)(T0, (env->gpr[src] >> sh) & 0xFF);
}
}
-#endif
/* Instruction cache invalidation helper */
void glue(do_icbi, MEMSUFFIX) (void)
@@ -252,27 +124,11 @@ void glue(do_icbi, MEMSUFFIX) (void)
* (not a fetch) by the MMU. To be sure it will be so,
* do the load "by hand".
*/
- tmp = glue(ldl, MEMSUFFIX)((uint32_t)T0);
- T0 &= ~(env->icache_line_size - 1);
- tb_invalidate_page_range((uint32_t)T0,
- (uint32_t)(T0 + env->icache_line_size));
-}
-
-#if defined(TARGET_PPC64)
-void glue(do_icbi_64, MEMSUFFIX) (void)
-{
- uint64_t tmp;
- /* Invalidate one cache line :
- * PowerPC specification says this is to be treated like a load
- * (not a fetch) by the MMU. To be sure it will be so,
- * do the load "by hand".
- */
- tmp = glue(ldq, MEMSUFFIX)((uint64_t)T0);
+ tmp = glue(ldl, MEMSUFFIX)(T0);
T0 &= ~(env->icache_line_size - 1);
- tb_invalidate_page_range((uint64_t)T0,
- (uint64_t)(T0 + env->icache_line_size));
+ /* We assume it would not wrap around 2^32 on 32 bits targets */
+ tb_invalidate_page_range(T0, T0 + env->icache_line_size);
}
-#endif
void glue(do_dcbz, MEMSUFFIX) (void)
{
@@ -281,90 +137,43 @@ void glue(do_dcbz, MEMSUFFIX) (void)
/* XXX: should be 970 specific (?) */
if (((env->spr[SPR_970_HID5] >> 7) & 0x3) == 1)
dcache_line_size = 32;
- glue(stl, MEMSUFFIX)((uint32_t)(T0 + 0x00), 0);
- glue(stl, MEMSUFFIX)((uint32_t)(T0 + 0x04), 0);
- glue(stl, MEMSUFFIX)((uint32_t)(T0 + 0x08), 0);
- glue(stl, MEMSUFFIX)((uint32_t)(T0 + 0x0C), 0);
- glue(stl, MEMSUFFIX)((uint32_t)(T0 + 0x10), 0);
- glue(stl, MEMSUFFIX)((uint32_t)(T0 + 0x14), 0);
- glue(stl, MEMSUFFIX)((uint32_t)(T0 + 0x18), 0);
- glue(stl, MEMSUFFIX)((uint32_t)(T0 + 0x1C), 0);
- if (dcache_line_size >= 64) {
- glue(stl, MEMSUFFIX)((uint32_t)(T0 + 0x20UL), 0);
- glue(stl, MEMSUFFIX)((uint32_t)(T0 + 0x24UL), 0);
- glue(stl, MEMSUFFIX)((uint32_t)(T0 + 0x28UL), 0);
- glue(stl, MEMSUFFIX)((uint32_t)(T0 + 0x2CUL), 0);
- glue(stl, MEMSUFFIX)((uint32_t)(T0 + 0x30UL), 0);
- glue(stl, MEMSUFFIX)((uint32_t)(T0 + 0x34UL), 0);
- glue(stl, MEMSUFFIX)((uint32_t)(T0 + 0x38UL), 0);
- glue(stl, MEMSUFFIX)((uint32_t)(T0 + 0x3CUL), 0);
- if (dcache_line_size >= 128) {
- glue(stl, MEMSUFFIX)((uint32_t)(T0 + 0x40UL), 0);
- glue(stl, MEMSUFFIX)((uint32_t)(T0 + 0x44UL), 0);
- glue(stl, MEMSUFFIX)((uint32_t)(T0 + 0x48UL), 0);
- glue(stl, MEMSUFFIX)((uint32_t)(T0 + 0x4CUL), 0);
- glue(stl, MEMSUFFIX)((uint32_t)(T0 + 0x50UL), 0);
- glue(stl, MEMSUFFIX)((uint32_t)(T0 + 0x54UL), 0);
- glue(stl, MEMSUFFIX)((uint32_t)(T0 + 0x58UL), 0);
- glue(stl, MEMSUFFIX)((uint32_t)(T0 + 0x5CUL), 0);
- glue(stl, MEMSUFFIX)((uint32_t)(T0 + 0x60UL), 0);
- glue(stl, MEMSUFFIX)((uint32_t)(T0 + 0x64UL), 0);
- glue(stl, MEMSUFFIX)((uint32_t)(T0 + 0x68UL), 0);
- glue(stl, MEMSUFFIX)((uint32_t)(T0 + 0x6CUL), 0);
- glue(stl, MEMSUFFIX)((uint32_t)(T0 + 0x70UL), 0);
- glue(stl, MEMSUFFIX)((uint32_t)(T0 + 0x74UL), 0);
- glue(stl, MEMSUFFIX)((uint32_t)(T0 + 0x78UL), 0);
- glue(stl, MEMSUFFIX)((uint32_t)(T0 + 0x7CUL), 0);
- }
- }
-}
-
-#if defined(TARGET_PPC64)
-void glue(do_dcbz_64, MEMSUFFIX) (void)
-{
- int dcache_line_size = env->dcache_line_size;
-
- /* XXX: should be 970 specific (?) */
- if (((env->spr[SPR_970_HID5] >> 6) & 0x3) == 0x2)
- dcache_line_size = 32;
- glue(stl, MEMSUFFIX)((uint64_t)(T0 + 0x00), 0);
- glue(stl, MEMSUFFIX)((uint64_t)(T0 + 0x04), 0);
- glue(stl, MEMSUFFIX)((uint64_t)(T0 + 0x08), 0);
- glue(stl, MEMSUFFIX)((uint64_t)(T0 + 0x0C), 0);
- glue(stl, MEMSUFFIX)((uint64_t)(T0 + 0x10), 0);
- glue(stl, MEMSUFFIX)((uint64_t)(T0 + 0x14), 0);
- glue(stl, MEMSUFFIX)((uint64_t)(T0 + 0x18), 0);
- glue(stl, MEMSUFFIX)((uint64_t)(T0 + 0x1C), 0);
+ glue(stl, MEMSUFFIX)(T0 + 0x00, 0);
+ glue(stl, MEMSUFFIX)(T0 + 0x04, 0);
+ glue(stl, MEMSUFFIX)(T0 + 0x08, 0);
+ glue(stl, MEMSUFFIX)(T0 + 0x0C, 0);
+ glue(stl, MEMSUFFIX)(T0 + 0x10, 0);
+ glue(stl, MEMSUFFIX)(T0 + 0x14, 0);
+ glue(stl, MEMSUFFIX)(T0 + 0x18, 0);
+ glue(stl, MEMSUFFIX)(T0 + 0x1C, 0);
if (dcache_line_size >= 64) {
- glue(stl, MEMSUFFIX)((uint64_t)(T0 + 0x20UL), 0);
- glue(stl, MEMSUFFIX)((uint64_t)(T0 + 0x24UL), 0);
- glue(stl, MEMSUFFIX)((uint64_t)(T0 + 0x28UL), 0);
- glue(stl, MEMSUFFIX)((uint64_t)(T0 + 0x2CUL), 0);
- glue(stl, MEMSUFFIX)((uint64_t)(T0 + 0x30UL), 0);
- glue(stl, MEMSUFFIX)((uint64_t)(T0 + 0x34UL), 0);
- glue(stl, MEMSUFFIX)((uint64_t)(T0 + 0x38UL), 0);
- glue(stl, MEMSUFFIX)((uint64_t)(T0 + 0x3CUL), 0);
+ glue(stl, MEMSUFFIX)(T0 + 0x20UL, 0);
+ glue(stl, MEMSUFFIX)(T0 + 0x24UL, 0);
+ glue(stl, MEMSUFFIX)(T0 + 0x28UL, 0);
+ glue(stl, MEMSUFFIX)(T0 + 0x2CUL, 0);
+ glue(stl, MEMSUFFIX)(T0 + 0x30UL, 0);
+ glue(stl, MEMSUFFIX)(T0 + 0x34UL, 0);
+ glue(stl, MEMSUFFIX)(T0 + 0x38UL, 0);
+ glue(stl, MEMSUFFIX)(T0 + 0x3CUL, 0);
if (dcache_line_size >= 128) {
- glue(stl, MEMSUFFIX)((uint64_t)(T0 + 0x40UL), 0);
- glue(stl, MEMSUFFIX)((uint64_t)(T0 + 0x44UL), 0);
- glue(stl, MEMSUFFIX)((uint64_t)(T0 + 0x48UL), 0);
- glue(stl, MEMSUFFIX)((uint64_t)(T0 + 0x4CUL), 0);
- glue(stl, MEMSUFFIX)((uint64_t)(T0 + 0x50UL), 0);
- glue(stl, MEMSUFFIX)((uint64_t)(T0 + 0x54UL), 0);
- glue(stl, MEMSUFFIX)((uint64_t)(T0 + 0x58UL), 0);
- glue(stl, MEMSUFFIX)((uint64_t)(T0 + 0x5CUL), 0);
- glue(stl, MEMSUFFIX)((uint64_t)(T0 + 0x60UL), 0);
- glue(stl, MEMSUFFIX)((uint64_t)(T0 + 0x64UL), 0);
- glue(stl, MEMSUFFIX)((uint64_t)(T0 + 0x68UL), 0);
- glue(stl, MEMSUFFIX)((uint64_t)(T0 + 0x6CUL), 0);
- glue(stl, MEMSUFFIX)((uint64_t)(T0 + 0x70UL), 0);
- glue(stl, MEMSUFFIX)((uint64_t)(T0 + 0x74UL), 0);
- glue(stl, MEMSUFFIX)((uint64_t)(T0 + 0x78UL), 0);
- glue(stl, MEMSUFFIX)((uint64_t)(T0 + 0x7CUL), 0);
+ glue(stl, MEMSUFFIX)(T0 + 0x40UL, 0);
+ glue(stl, MEMSUFFIX)(T0 + 0x44UL, 0);
+ glue(stl, MEMSUFFIX)(T0 + 0x48UL, 0);
+ glue(stl, MEMSUFFIX)(T0 + 0x4CUL, 0);
+ glue(stl, MEMSUFFIX)(T0 + 0x50UL, 0);
+ glue(stl, MEMSUFFIX)(T0 + 0x54UL, 0);
+ glue(stl, MEMSUFFIX)(T0 + 0x58UL, 0);
+ glue(stl, MEMSUFFIX)(T0 + 0x5CUL, 0);
+ glue(stl, MEMSUFFIX)(T0 + 0x60UL, 0);
+ glue(stl, MEMSUFFIX)(T0 + 0x64UL, 0);
+ glue(stl, MEMSUFFIX)(T0 + 0x68UL, 0);
+ glue(stl, MEMSUFFIX)(T0 + 0x6CUL, 0);
+ glue(stl, MEMSUFFIX)(T0 + 0x70UL, 0);
+ glue(stl, MEMSUFFIX)(T0 + 0x74UL, 0);
+ glue(stl, MEMSUFFIX)(T0 + 0x78UL, 0);
+ glue(stl, MEMSUFFIX)(T0 + 0x7CUL, 0);
}
}
}
-#endif
/* PowerPC 601 specific instructions (POWER bridge) */
// XXX: to be tested
@@ -400,26 +209,6 @@ void glue(do_POWER2_lfq, MEMSUFFIX) (voi
FT1 = glue(ldfq, MEMSUFFIX)((uint32_t)(T0 + 4));
}
-static always_inline double glue(ldfqr, MEMSUFFIX) (target_ulong EA)
-{
- union {
- double d;
- uint64_t u;
- } u;
-
- u.d = glue(ldfq, MEMSUFFIX)(EA);
- u.u = ((u.u & 0xFF00000000000000ULL) >> 56) |
- ((u.u & 0x00FF000000000000ULL) >> 40) |
- ((u.u & 0x0000FF0000000000ULL) >> 24) |
- ((u.u & 0x000000FF00000000ULL) >> 8) |
- ((u.u & 0x00000000FF000000ULL) << 8) |
- ((u.u & 0x0000000000FF0000ULL) << 24) |
- ((u.u & 0x000000000000FF00ULL) << 40) |
- ((u.u & 0x00000000000000FFULL) << 56);
-
- return u.d;
-}
-
void glue(do_POWER2_lfq_le, MEMSUFFIX) (void)
{
FT0 = glue(ldfqr, MEMSUFFIX)((uint32_t)(T0 + 4));
@@ -432,25 +221,6 @@ void glue(do_POWER2_stfq, MEMSUFFIX) (vo
glue(stfq, MEMSUFFIX)((uint32_t)(T0 + 4), FT1);
}
-static always_inline void glue(stfqr, MEMSUFFIX) (target_ulong EA, double d)
-{
- union {
- double d;
- uint64_t u;
- } u;
-
- u.d = d;
- u.u = ((u.u & 0xFF00000000000000ULL) >> 56) |
- ((u.u & 0x00FF000000000000ULL) >> 40) |
- ((u.u & 0x0000FF0000000000ULL) >> 24) |
- ((u.u & 0x000000FF00000000ULL) >> 8) |
- ((u.u & 0x00000000FF000000ULL) << 8) |
- ((u.u & 0x0000000000FF0000ULL) << 24) |
- ((u.u & 0x000000000000FF00ULL) << 40) |
- ((u.u & 0x00000000000000FFULL) << 56);
- glue(stfq, MEMSUFFIX)(EA, u.d);
-}
-
void glue(do_POWER2_stfq_le, MEMSUFFIX) (void)
{
glue(stfqr, MEMSUFFIX)((uint32_t)(T0 + 4), FT0);
Index: target-ppc/op_mem.h
===================================================================
RCS file: /sources/qemu/qemu/target-ppc/op_mem.h,v
retrieving revision 1.22
diff -u -d -d -p -r1.22 op_mem.h
--- target-ppc/op_mem.h 7 Oct 2007 18:19:25 -0000 1.22
+++ target-ppc/op_mem.h 12 Oct 2007 07:14:49 -0000
@@ -18,82 +18,15 @@
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
-static always_inline uint16_t glue(ld16r, MEMSUFFIX) (target_ulong EA)
-{
- uint16_t tmp = glue(lduw, MEMSUFFIX)(EA);
- return ((tmp & 0xFF00) >> 8) | ((tmp & 0x00FF) << 8);
-}
-
-static always_inline int32_t glue(ld16rs, MEMSUFFIX) (target_ulong EA)
-{
- int16_t tmp = glue(lduw, MEMSUFFIX)(EA);
- return (int16_t)((tmp & 0xFF00) >> 8) | ((tmp & 0x00FF) << 8);
-}
-
-static always_inline uint32_t glue(ld32r, MEMSUFFIX) (target_ulong EA)
-{
- uint32_t tmp = glue(ldl, MEMSUFFIX)(EA);
- return ((tmp & 0xFF000000) >> 24) | ((tmp & 0x00FF0000) >> 8) |
- ((tmp & 0x0000FF00) << 8) | ((tmp & 0x000000FF) << 24);
-}
-
-#if defined(TARGET_PPC64) || defined(TARGET_PPCEMB)
-static always_inline uint64_t glue(ld64r, MEMSUFFIX) (target_ulong EA)
-{
- uint64_t tmp = glue(ldq, MEMSUFFIX)(EA);
- return ((tmp & 0xFF00000000000000ULL) >> 56) |
- ((tmp & 0x00FF000000000000ULL) >> 40) |
- ((tmp & 0x0000FF0000000000ULL) >> 24) |
- ((tmp & 0x000000FF00000000ULL) >> 8) |
- ((tmp & 0x00000000FF000000ULL) << 8) |
- ((tmp & 0x0000000000FF0000ULL) << 24) |
- ((tmp & 0x000000000000FF00ULL) << 40) |
- ((tmp & 0x00000000000000FFULL) << 54);
-}
-#endif
-
#if defined(TARGET_PPC64)
static always_inline int64_t glue(ldsl, MEMSUFFIX) (target_ulong EA)
{
return (int32_t)glue(ldl, MEMSUFFIX)(EA);
}
-static always_inline int64_t glue(ld32rs, MEMSUFFIX) (target_ulong EA)
-{
- uint32_t tmp = glue(ldl, MEMSUFFIX)(EA);
- return (int32_t)((tmp & 0xFF000000) >> 24) | ((tmp & 0x00FF0000) >> 8) |
- ((tmp & 0x0000FF00) << 8) | ((tmp & 0x000000FF) << 24);
-}
-#endif
-
-static always_inline void glue(st16r, MEMSUFFIX) (target_ulong EA,
- uint16_t data)
-{
- uint16_t tmp = ((data & 0xFF00) >> 8) | ((data & 0x00FF) << 8);
- glue(stw, MEMSUFFIX)(EA, tmp);
-}
-
-static always_inline void glue(st32r, MEMSUFFIX) (target_ulong EA,
- uint32_t data)
-{
- uint32_t tmp = ((data & 0xFF000000) >> 24) | ((data & 0x00FF0000) >> 8) |
- ((data & 0x0000FF00) << 8) | ((data & 0x000000FF) << 24);
- glue(stl, MEMSUFFIX)(EA, tmp);
-}
-
-#if defined(TARGET_PPC64) || defined(TARGET_PPCEMB)
-static always_inline void glue(st64r, MEMSUFFIX) (target_ulong EA,
- uint64_t data)
+static always_inline int64_t glue(ldslr, MEMSUFFIX) (target_ulong EA)
{
- uint64_t tmp = ((data & 0xFF00000000000000ULL) >> 56) |
- ((data & 0x00FF000000000000ULL) >> 40) |
- ((data & 0x0000FF0000000000ULL) >> 24) |
- ((data & 0x000000FF00000000ULL) >> 8) |
- ((data & 0x00000000FF000000ULL) << 8) |
- ((data & 0x0000000000FF0000ULL) << 24) |
- ((data & 0x000000000000FF00ULL) << 40) |
- ((data & 0x00000000000000FFULL) << 56);
- glue(stq, MEMSUFFIX)(EA, tmp);
+ return (int32_t)glue(ldlr, MEMSUFFIX)(EA);
}
#endif
@@ -130,6 +63,7 @@ void OPPROTO glue(glue(glue(op_st, name)
}
#endif
+/* Native-endian fixed-point memory loads */
PPC_LD_OP(bz, ldub);
PPC_LD_OP(ha, ldsw);
PPC_LD_OP(hz, lduw);
@@ -145,20 +79,21 @@ PPC_LD_OP_64(hz, lduw);
PPC_LD_OP_64(wz, ldl);
#endif
-PPC_LD_OP(ha_le, ld16rs);
-PPC_LD_OP(hz_le, ld16r);
-PPC_LD_OP(wz_le, ld32r);
+/* Reverse-endian fixed-point memory loads */
+PPC_LD_OP(ha_le, ldswr);
+PPC_LD_OP(hz_le, lduwr);
+PPC_LD_OP(wz_le, ldlr);
#if defined(TARGET_PPC64)
-PPC_LD_OP(d_le, ld64r);
-PPC_LD_OP(wa_le, ld32rs);
-PPC_LD_OP_64(d_le, ld64r);
-PPC_LD_OP_64(wa_le, ld32rs);
-PPC_LD_OP_64(ha_le, ld16rs);
-PPC_LD_OP_64(hz_le, ld16r);
-PPC_LD_OP_64(wz_le, ld32r);
+PPC_LD_OP(d_le, ldqr);
+PPC_LD_OP(wa_le, ldslr);
+PPC_LD_OP_64(d_le, ldqr);
+PPC_LD_OP_64(wa_le, ldslr);
+PPC_LD_OP_64(ha_le, ldswr);
+PPC_LD_OP_64(hz_le, lduwr);
+PPC_LD_OP_64(wz_le, ldlr);
#endif
-/*** Integer store ***/
+/* Native-endian fixed-point memory stores */
PPC_ST_OP(b, stb);
PPC_ST_OP(h, stw);
PPC_ST_OP(w, stl);
@@ -170,27 +105,29 @@ PPC_ST_OP_64(h, stw);
PPC_ST_OP_64(w, stl);
#endif
-PPC_ST_OP(h_le, st16r);
-PPC_ST_OP(w_le, st32r);
+/* Reverse-endian fixed-point memory stores */
+PPC_ST_OP(h_le, stwr);
+PPC_ST_OP(w_le, stlr);
#if defined(TARGET_PPC64)
-PPC_ST_OP(d_le, st64r);
-PPC_ST_OP_64(d_le, st64r);
-PPC_ST_OP_64(h_le, st16r);
-PPC_ST_OP_64(w_le, st32r);
+PPC_ST_OP(d_le, stqr);
+PPC_ST_OP_64(d_le, stqr);
+PPC_ST_OP_64(h_le, stwr);
+PPC_ST_OP_64(w_le, stlr);
#endif
-/*** Integer load and store with byte reverse ***/
-PPC_LD_OP(hbr, ld16r);
-PPC_LD_OP(wbr, ld32r);
-PPC_ST_OP(hbr, st16r);
-PPC_ST_OP(wbr, st32r);
+/* Native-endian fixed-point loads and stores with byte-reverse */
+PPC_LD_OP(hbr, lduwr);
+PPC_LD_OP(wbr, ldlr);
+PPC_ST_OP(hbr, stwr);
+PPC_ST_OP(wbr, stlr);
#if defined(TARGET_PPC64)
-PPC_LD_OP_64(hbr, ld16r);
-PPC_LD_OP_64(wbr, ld32r);
-PPC_ST_OP_64(hbr, st16r);
-PPC_ST_OP_64(wbr, st32r);
+PPC_LD_OP_64(hbr, lduwr);
+PPC_LD_OP_64(wbr, ldlr);
+PPC_ST_OP_64(hbr, stwr);
+PPC_ST_OP_64(wbr, stlr);
#endif
+/* Reverse-endian fixed-point loads and stores with byte-reverse */
PPC_LD_OP(hbr_le, lduw);
PPC_LD_OP(wbr_le, ldl);
PPC_ST_OP(hbr_le, stw);
@@ -202,88 +139,76 @@ PPC_ST_OP_64(hbr_le, stw);
PPC_ST_OP_64(wbr_le, stl);
#endif
-/*** Integer load and store multiple ***/
+/* Native-endian fixed-point loads and stores multiple */
void OPPROTO glue(op_lmw, MEMSUFFIX) (void)
{
+ T0 = (uint32_t)T0;
glue(do_lmw, MEMSUFFIX)(PARAM1);
RETURN();
}
-#if defined(TARGET_PPC64)
-void OPPROTO glue(op_lmw_64, MEMSUFFIX) (void)
-{
- glue(do_lmw_64, MEMSUFFIX)(PARAM1);
- RETURN();
-}
-#endif
-
-void OPPROTO glue(op_lmw_le, MEMSUFFIX) (void)
+void OPPROTO glue(op_stmw, MEMSUFFIX) (void)
{
- glue(do_lmw_le, MEMSUFFIX)(PARAM1);
+ T0 = (uint32_t)T0;
+ glue(do_stmw, MEMSUFFIX)(PARAM1);
RETURN();
}
#if defined(TARGET_PPC64)
-void OPPROTO glue(op_lmw_le_64, MEMSUFFIX) (void)
+void OPPROTO glue(op_lmw_64, MEMSUFFIX) (void)
{
- glue(do_lmw_le_64, MEMSUFFIX)(PARAM1);
+ glue(do_lmw, MEMSUFFIX)(PARAM1);
RETURN();
}
-#endif
-void OPPROTO glue(op_stmw, MEMSUFFIX) (void)
+void OPPROTO glue(op_stmw_64, MEMSUFFIX) (void)
{
glue(do_stmw, MEMSUFFIX)(PARAM1);
RETURN();
}
+#endif
-#if defined(TARGET_PPC64)
-void OPPROTO glue(op_stmw_64, MEMSUFFIX) (void)
+/* Reverse-endian fixed-point loads and stores multiple */
+void OPPROTO glue(op_lmw_le, MEMSUFFIX) (void)
{
- glue(do_stmw_64, MEMSUFFIX)(PARAM1);
+ T0 = (uint32_t)T0;
+ glue(do_lmw_le, MEMSUFFIX)(PARAM1);
RETURN();
}
-#endif
void OPPROTO glue(op_stmw_le, MEMSUFFIX) (void)
{
+ T0 = (uint32_t)T0;
glue(do_stmw_le, MEMSUFFIX)(PARAM1);
RETURN();
}
#if defined(TARGET_PPC64)
-void OPPROTO glue(op_stmw_le_64, MEMSUFFIX) (void)
-{
- glue(do_stmw_le_64, MEMSUFFIX)(PARAM1);
- RETURN();
-}
-#endif
-
-/*** Integer load and store strings ***/
-void OPPROTO glue(op_lswi, MEMSUFFIX) (void)
+void OPPROTO glue(op_lmw_le_64, MEMSUFFIX) (void)
{
- glue(do_lsw, MEMSUFFIX)(PARAM1);
+ glue(do_lmw_le, MEMSUFFIX)(PARAM1);
RETURN();
}
-#if defined(TARGET_PPC64)
-void OPPROTO glue(op_lswi_64, MEMSUFFIX) (void)
+void OPPROTO glue(op_stmw_le_64, MEMSUFFIX) (void)
{
- glue(do_lsw_64, MEMSUFFIX)(PARAM1);
+ glue(do_stmw_le, MEMSUFFIX)(PARAM1);
RETURN();
}
#endif
-void OPPROTO glue(op_lswi_le, MEMSUFFIX) (void)
+/* Native-endian loads and stores string */
+void OPPROTO glue(op_lswi, MEMSUFFIX) (void)
{
- glue(do_lsw_le, MEMSUFFIX)(PARAM1);
+ glue(do_lsw, MEMSUFFIX)(PARAM1);
RETURN();
}
#if defined(TARGET_PPC64)
-void OPPROTO glue(op_lswi_le_64, MEMSUFFIX) (void)
+void OPPROTO glue(op_lswi_64, MEMSUFFIX) (void)
{
- glue(do_lsw_le_64, MEMSUFFIX)(PARAM1);
+ T0 = (uint32_t)T0;
+ glue(do_lsw, MEMSUFFIX)(PARAM1);
RETURN();
}
#endif
@@ -303,6 +228,7 @@ void OPPROTO glue(op_lswx, MEMSUFFIX) (v
POWERPC_EXCP_INVAL |
POWERPC_EXCP_INVAL_LSWX);
} else {
+ T0 = (uint32_t)T0;
glue(do_lsw, MEMSUFFIX)(PARAM1);
}
}
@@ -320,13 +246,44 @@ void OPPROTO glue(op_lswx_64, MEMSUFFIX)
POWERPC_EXCP_INVAL |
POWERPC_EXCP_INVAL_LSWX);
} else {
- glue(do_lsw_64, MEMSUFFIX)(PARAM1);
+ glue(do_lsw, MEMSUFFIX)(PARAM1);
}
}
RETURN();
}
#endif
+void OPPROTO glue(op_stsw, MEMSUFFIX) (void)
+{
+ T0 = (uint32_t)T0;
+ glue(do_stsw, MEMSUFFIX)(PARAM1);
+ RETURN();
+}
+
+#if defined(TARGET_PPC64)
+void OPPROTO glue(op_stsw_64, MEMSUFFIX) (void)
+{
+ glue(do_stsw, MEMSUFFIX)(PARAM1);
+ RETURN();
+}
+#endif
+
+/* Reverse-endian loads and stores string */
+void OPPROTO glue(op_lswi_le, MEMSUFFIX) (void)
+{
+ T0 = (uint32_t)T0;
+ glue(do_lsw_le, MEMSUFFIX)(PARAM1);
+ RETURN();
+}
+
+#if defined(TARGET_PPC64)
+void OPPROTO glue(op_lswi_le_64, MEMSUFFIX) (void)
+{
+ glue(do_lsw_le, MEMSUFFIX)(PARAM1);
+ RETURN();
+}
+#endif
+
void OPPROTO glue(op_lswx_le, MEMSUFFIX) (void)
{
/* Note: T1 comes from xer_bc then no cast is needed */
@@ -337,6 +294,7 @@ void OPPROTO glue(op_lswx_le, MEMSUFFIX)
POWERPC_EXCP_INVAL |
POWERPC_EXCP_INVAL_LSWX);
} else {
+ T0 = (uint32_t)T0;
glue(do_lsw_le, MEMSUFFIX)(PARAM1);
}
}
@@ -354,29 +312,16 @@ void OPPROTO glue(op_lswx_le_64, MEMSUFF
POWERPC_EXCP_INVAL |
POWERPC_EXCP_INVAL_LSWX);
} else {
- glue(do_lsw_le_64, MEMSUFFIX)(PARAM1);
+ glue(do_lsw_le, MEMSUFFIX)(PARAM1);
}
}
RETURN();
}
#endif
-void OPPROTO glue(op_stsw, MEMSUFFIX) (void)
-{
- glue(do_stsw, MEMSUFFIX)(PARAM1);
- RETURN();
-}
-
-#if defined(TARGET_PPC64)
-void OPPROTO glue(op_stsw_64, MEMSUFFIX) (void)
-{
- glue(do_stsw_64, MEMSUFFIX)(PARAM1);
- RETURN();
-}
-#endif
-
void OPPROTO glue(op_stsw_le, MEMSUFFIX) (void)
{
+ T0 = (uint32_t)T0;
glue(do_stsw_le, MEMSUFFIX)(PARAM1);
RETURN();
}
@@ -384,7 +329,7 @@ void OPPROTO glue(op_stsw_le, MEMSUFFIX)
#if defined(TARGET_PPC64)
void OPPROTO glue(op_stsw_le_64, MEMSUFFIX) (void)
{
- glue(do_stsw_le_64, MEMSUFFIX)(PARAM1);
+ glue(do_stsw_le, MEMSUFFIX)(PARAM1);
RETURN();
}
#endif
@@ -432,38 +377,9 @@ PPC_STF_OP_64(fs, stfs);
PPC_STF_OP_64(fiwx, stfiwx);
#endif
-static always_inline void glue(stfqr, MEMSUFFIX) (target_ulong EA, double d)
-{
- union {
- double d;
- uint64_t u;
- } u;
-
- u.d = d;
- u.u = ((u.u & 0xFF00000000000000ULL) >> 56) |
- ((u.u & 0x00FF000000000000ULL) >> 40) |
- ((u.u & 0x0000FF0000000000ULL) >> 24) |
- ((u.u & 0x000000FF00000000ULL) >> 8) |
- ((u.u & 0x00000000FF000000ULL) << 8) |
- ((u.u & 0x0000000000FF0000ULL) << 24) |
- ((u.u & 0x000000000000FF00ULL) << 40) |
- ((u.u & 0x00000000000000FFULL) << 56);
- glue(stfq, MEMSUFFIX)(EA, u.d);
-}
-
static always_inline void glue(stfsr, MEMSUFFIX) (target_ulong EA, double d)
{
- union {
- float f;
- uint32_t u;
- } u;
-
- u.f = float64_to_float32(d, &env->fp_status);
- u.u = ((u.u & 0xFF000000UL) >> 24) |
- ((u.u & 0x00FF0000ULL) >> 8) |
- ((u.u & 0x0000FF00UL) << 8) |
- ((u.u & 0x000000FFULL) << 24);
- glue(stfl, MEMSUFFIX)(EA, u.f);
+ glue(stflr, MEMSUFFIX)(EA, float64_to_float32(d, &env->fp_status));
}
static always_inline void glue(stfiwxr, MEMSUFFIX) (target_ulong EA, double d)
@@ -475,11 +391,7 @@ static always_inline void glue(stfiwxr,
/* Store the low order 32 bits without any conversion */
u.d = d;
- u.u = ((u.u & 0xFF000000UL) >> 24) |
- ((u.u & 0x00FF0000ULL) >> 8) |
- ((u.u & 0x0000FF00UL) << 8) |
- ((u.u & 0x000000FFULL) << 24);
- glue(stl, MEMSUFFIX)(EA, u.u);
+ glue(stlr, MEMSUFFIX)(EA, u.u);
}
PPC_STF_OP(fd_le, stfqr);
@@ -520,40 +432,9 @@ PPC_LDF_OP_64(fd, ldfq);
PPC_LDF_OP_64(fs, ldfs);
#endif
-static always_inline double glue(ldfqr, MEMSUFFIX) (target_ulong EA)
-{
- union {
- double d;
- uint64_t u;
- } u;
-
- u.d = glue(ldfq, MEMSUFFIX)(EA);
- u.u = ((u.u & 0xFF00000000000000ULL) >> 56) |
- ((u.u & 0x00FF000000000000ULL) >> 40) |
- ((u.u & 0x0000FF0000000000ULL) >> 24) |
- ((u.u & 0x000000FF00000000ULL) >> 8) |
- ((u.u & 0x00000000FF000000ULL) << 8) |
- ((u.u & 0x0000000000FF0000ULL) << 24) |
- ((u.u & 0x000000000000FF00ULL) << 40) |
- ((u.u & 0x00000000000000FFULL) << 56);
-
- return u.d;
-}
-
static always_inline double glue(ldfsr, MEMSUFFIX) (target_ulong EA)
{
- union {
- float f;
- uint32_t u;
- } u;
-
- u.f = glue(ldfl, MEMSUFFIX)(EA);
- u.u = ((u.u & 0xFF000000UL) >> 24) |
- ((u.u & 0x00FF0000ULL) >> 8) |
- ((u.u & 0x0000FF00UL) << 8) |
- ((u.u & 0x000000FFULL) << 24);
-
- return float32_to_float64(u.f, &env->fp_status);
+ return float32_to_float64(glue(ldflr, MEMSUFFIX)(EA), &env->fp_status);
}
PPC_LDF_OP(fd_le, ldfqr);
@@ -615,7 +496,7 @@ void OPPROTO glue(op_lwarx_le, MEMSUFFIX
if (unlikely(T0 & 0x03)) {
do_raise_exception(POWERPC_EXCP_ALIGN);
} else {
- T1 = glue(ld32r, MEMSUFFIX)((uint32_t)T0);
+ T1 = glue(ldlr, MEMSUFFIX)((uint32_t)T0);
env->reserve = (uint32_t)T0;
}
RETURN();
@@ -627,7 +508,7 @@ void OPPROTO glue(op_lwarx_le_64, MEMSUF
if (unlikely(T0 & 0x03)) {
do_raise_exception(POWERPC_EXCP_ALIGN);
} else {
- T1 = glue(ld32r, MEMSUFFIX)((uint64_t)T0);
+ T1 = glue(ldlr, MEMSUFFIX)((uint64_t)T0);
env->reserve = (uint64_t)T0;
}
RETURN();
@@ -638,7 +519,7 @@ void OPPROTO glue(op_ldarx_le, MEMSUFFIX
if (unlikely(T0 & 0x03)) {
do_raise_exception(POWERPC_EXCP_ALIGN);
} else {
- T1 = glue(ld64r, MEMSUFFIX)((uint32_t)T0);
+ T1 = glue(ldqr, MEMSUFFIX)((uint32_t)T0);
env->reserve = (uint32_t)T0;
}
RETURN();
@@ -649,7 +530,7 @@ void OPPROTO glue(op_ldarx_le_64, MEMSUF
if (unlikely(T0 & 0x03)) {
do_raise_exception(POWERPC_EXCP_ALIGN);
} else {
- T1 = glue(ld64r, MEMSUFFIX)((uint64_t)T0);
+ T1 = glue(ldqr, MEMSUFFIX)((uint64_t)T0);
env->reserve = (uint64_t)T0;
}
RETURN();
@@ -731,7 +612,7 @@ void OPPROTO glue(op_stwcx_le, MEMSUFFIX
if (unlikely(env->reserve != (uint32_t)T0)) {
env->crf[0] = xer_so;
} else {
- glue(st32r, MEMSUFFIX)((uint32_t)T0, T1);
+ glue(stlr, MEMSUFFIX)((uint32_t)T0, T1);
env->crf[0] = xer_so | 0x02;
}
}
@@ -748,7 +629,7 @@ void OPPROTO glue(op_stwcx_le_64, MEMSUF
if (unlikely(env->reserve != (uint64_t)T0)) {
env->crf[0] = xer_so;
} else {
- glue(st32r, MEMSUFFIX)((uint64_t)T0, T1);
+ glue(stlr, MEMSUFFIX)((uint64_t)T0, T1);
env->crf[0] = xer_so | 0x02;
}
}
@@ -764,7 +645,7 @@ void OPPROTO glue(op_stdcx_le, MEMSUFFIX
if (unlikely(env->reserve != (uint32_t)T0)) {
env->crf[0] = xer_so;
} else {
- glue(st64r, MEMSUFFIX)((uint32_t)T0, T1);
+ glue(stqr, MEMSUFFIX)((uint32_t)T0, T1);
env->crf[0] = xer_so | 0x02;
}
}
@@ -780,7 +661,7 @@ void OPPROTO glue(op_stdcx_le_64, MEMSUF
if (unlikely(env->reserve != (uint64_t)T0)) {
env->crf[0] = xer_so;
} else {
- glue(st64r, MEMSUFFIX)((uint64_t)T0, T1);
+ glue(stqr, MEMSUFFIX)((uint64_t)T0, T1);
env->crf[0] = xer_so | 0x02;
}
}
@@ -862,6 +743,7 @@ void OPPROTO glue(op_dcbz_l128, MEMSUFFI
void OPPROTO glue(op_dcbz, MEMSUFFIX) (void)
{
+ T0 = (uint32_t)T0;
glue(do_dcbz, MEMSUFFIX)();
RETURN();
}
@@ -940,7 +822,7 @@ void OPPROTO glue(op_dcbz_l128_64, MEMSU
void OPPROTO glue(op_dcbz_64, MEMSUFFIX) (void)
{
- glue(do_dcbz_64, MEMSUFFIX)();
+ glue(do_dcbz, MEMSUFFIX)();
RETURN();
}
#endif
@@ -948,6 +830,7 @@ void OPPROTO glue(op_dcbz_64, MEMSUFFIX)
/* Instruction cache block invalidate */
void OPPROTO glue(op_icbi, MEMSUFFIX) (void)
{
+ T0 = (uint32_t)T0;
glue(do_icbi, MEMSUFFIX)();
RETURN();
}
@@ -955,7 +838,7 @@ void OPPROTO glue(op_icbi, MEMSUFFIX) (v
#if defined(TARGET_PPC64)
void OPPROTO glue(op_icbi_64, MEMSUFFIX) (void)
{
- glue(do_icbi_64, MEMSUFFIX)();
+ glue(do_icbi, MEMSUFFIX)();
RETURN();
}
#endif
@@ -991,28 +874,28 @@ void OPPROTO glue(op_ecowx_64, MEMSUFFIX
void OPPROTO glue(op_eciwx_le, MEMSUFFIX) (void)
{
- T1 = glue(ld32r, MEMSUFFIX)((uint32_t)T0);
+ T1 = glue(ldlr, MEMSUFFIX)((uint32_t)T0);
RETURN();
}
#if defined(TARGET_PPC64)
void OPPROTO glue(op_eciwx_le_64, MEMSUFFIX) (void)
{
- T1 = glue(ld32r, MEMSUFFIX)((uint64_t)T0);
+ T1 = glue(ldlr, MEMSUFFIX)((uint64_t)T0);
RETURN();
}
#endif
void OPPROTO glue(op_ecowx_le, MEMSUFFIX) (void)
{
- glue(st32r, MEMSUFFIX)((uint32_t)T0, T1);
+ glue(stlr, MEMSUFFIX)((uint32_t)T0, T1);
RETURN();
}
#if defined(TARGET_PPC64)
void OPPROTO glue(op_ecowx_le_64, MEMSUFFIX) (void)
{
- glue(st32r, MEMSUFFIX)((uint64_t)T0, T1);
+ glue(stlr, MEMSUFFIX)((uint64_t)T0, T1);
RETURN();
}
#endif
@@ -1070,8 +953,8 @@ void OPPROTO glue(op_vr_lvx, MEMSUFFIX)
void OPPROTO glue(op_vr_lvx_le, MEMSUFFIX) (void)
{
- AVR0.u64[VR_DWORD1] = glue(ldq, MEMSUFFIX)((uint32_t)T0);
- AVR0.u64[VR_DWORD0] = glue(ldq, MEMSUFFIX)((uint32_t)T0 + 8);
+ AVR0.u64[VR_DWORD1] = glue(ldqr, MEMSUFFIX)((uint32_t)T0);
+ AVR0.u64[VR_DWORD0] = glue(ldqr, MEMSUFFIX)((uint32_t)T0 + 8);
}
void OPPROTO glue(op_vr_stvx, MEMSUFFIX) (void)
@@ -1082,8 +965,8 @@ void OPPROTO glue(op_vr_stvx, MEMSUFFIX)
void OPPROTO glue(op_vr_stvx_le, MEMSUFFIX) (void)
{
- glue(stq, MEMSUFFIX)((uint32_t)T0, AVR0.u64[VR_DWORD1]);
- glue(stq, MEMSUFFIX)((uint32_t)T0 + 8, AVR0.u64[VR_DWORD0]);
+ glue(stqr, MEMSUFFIX)((uint32_t)T0, AVR0.u64[VR_DWORD1]);
+ glue(stqr, MEMSUFFIX)((uint32_t)T0 + 8, AVR0.u64[VR_DWORD0]);
}
#if defined(TARGET_PPC64)
@@ -1095,8 +978,8 @@ void OPPROTO glue(op_vr_lvx_64, MEMSUFFI
void OPPROTO glue(op_vr_lvx_le_64, MEMSUFFIX) (void)
{
- AVR0.u64[VR_DWORD1] = glue(ldq, MEMSUFFIX)((uint64_t)T0);
- AVR0.u64[VR_DWORD0] = glue(ldq, MEMSUFFIX)((uint64_t)T0 + 8);
+ AVR0.u64[VR_DWORD1] = glue(ldqr, MEMSUFFIX)((uint64_t)T0);
+ AVR0.u64[VR_DWORD0] = glue(ldqr, MEMSUFFIX)((uint64_t)T0 + 8);
}
void OPPROTO glue(op_vr_stvx_64, MEMSUFFIX) (void)
@@ -1107,8 +990,8 @@ void OPPROTO glue(op_vr_stvx_64, MEMSUFF
void OPPROTO glue(op_vr_stvx_le_64, MEMSUFFIX) (void)
{
- glue(stq, MEMSUFFIX)((uint64_t)T0, AVR0.u64[VR_DWORD1]);
- glue(stq, MEMSUFFIX)((uint64_t)T0 + 8, AVR0.u64[VR_DWORD0]);
+ glue(stqr, MEMSUFFIX)((uint64_t)T0, AVR0.u64[VR_DWORD1]);
+ glue(stqr, MEMSUFFIX)((uint64_t)T0 + 8, AVR0.u64[VR_DWORD0]);
}
#endif
#undef VR_DWORD0
@@ -1163,8 +1046,8 @@ _PPC_SPE_ST_OP(name, op)
#if !defined(TARGET_PPC64)
PPC_SPE_LD_OP(dd, ldq);
PPC_SPE_ST_OP(dd, stq);
-PPC_SPE_LD_OP(dd_le, ld64r);
-PPC_SPE_ST_OP(dd_le, st64r);
+PPC_SPE_LD_OP(dd_le, ldqr);
+PPC_SPE_ST_OP(dd_le, stqr);
#endif
static always_inline uint64_t glue(spe_ldw, MEMSUFFIX) (target_ulong EA)
{
@@ -1184,16 +1067,16 @@ PPC_SPE_ST_OP(dw, spe_stdw);
static always_inline uint64_t glue(spe_ldw_le, MEMSUFFIX) (target_ulong EA)
{
uint64_t ret;
- ret = (uint64_t)glue(ld32r, MEMSUFFIX)(EA) << 32;
- ret |= (uint64_t)glue(ld32r, MEMSUFFIX)(EA + 4);
+ ret = (uint64_t)glue(ldlr, MEMSUFFIX)(EA) << 32;
+ ret |= (uint64_t)glue(ldlr, MEMSUFFIX)(EA + 4);
return ret;
}
PPC_SPE_LD_OP(dw_le, spe_ldw_le);
static always_inline void glue(spe_stdw_le, MEMSUFFIX) (target_ulong EA,
uint64_t data)
{
- glue(st32r, MEMSUFFIX)(EA, data >> 32);
- glue(st32r, MEMSUFFIX)(EA + 4, data);
+ glue(stlr, MEMSUFFIX)(EA, data >> 32);
+ glue(stlr, MEMSUFFIX)(EA + 4, data);
}
PPC_SPE_ST_OP(dw_le, spe_stdw_le);
static always_inline uint64_t glue(spe_ldh, MEMSUFFIX) (target_ulong EA)
@@ -1218,20 +1101,20 @@ PPC_SPE_ST_OP(dh, spe_stdh);
static always_inline uint64_t glue(spe_ldh_le, MEMSUFFIX) (target_ulong EA)
{
uint64_t ret;
- ret = (uint64_t)glue(ld16r, MEMSUFFIX)(EA) << 48;
- ret |= (uint64_t)glue(ld16r, MEMSUFFIX)(EA + 2) << 32;
- ret |= (uint64_t)glue(ld16r, MEMSUFFIX)(EA + 4) << 16;
- ret |= (uint64_t)glue(ld16r, MEMSUFFIX)(EA + 6);
+ ret = (uint64_t)glue(lduwr, MEMSUFFIX)(EA) << 48;
+ ret |= (uint64_t)glue(lduwr, MEMSUFFIX)(EA + 2) << 32;
+ ret |= (uint64_t)glue(lduwr, MEMSUFFIX)(EA + 4) << 16;
+ ret |= (uint64_t)glue(lduwr, MEMSUFFIX)(EA + 6);
return ret;
}
PPC_SPE_LD_OP(dh_le, spe_ldh_le);
static always_inline void glue(spe_stdh_le, MEMSUFFIX) (target_ulong EA,
uint64_t data)
{
- glue(st16r, MEMSUFFIX)(EA, data >> 48);
- glue(st16r, MEMSUFFIX)(EA + 2, data >> 32);
- glue(st16r, MEMSUFFIX)(EA + 4, data >> 16);
- glue(st16r, MEMSUFFIX)(EA + 6, data);
+ glue(stwr, MEMSUFFIX)(EA, data >> 48);
+ glue(stwr, MEMSUFFIX)(EA + 2, data >> 32);
+ glue(stwr, MEMSUFFIX)(EA + 4, data >> 16);
+ glue(stwr, MEMSUFFIX)(EA + 6, data);
}
PPC_SPE_ST_OP(dh_le, spe_stdh_le);
static always_inline uint64_t glue(spe_lwhe, MEMSUFFIX) (target_ulong EA)
@@ -1252,16 +1135,16 @@ PPC_SPE_ST_OP(whe, spe_stwhe);
static always_inline uint64_t glue(spe_lwhe_le, MEMSUFFIX) (target_ulong EA)
{
uint64_t ret;
- ret = (uint64_t)glue(ld16r, MEMSUFFIX)(EA) << 48;
- ret |= (uint64_t)glue(ld16r, MEMSUFFIX)(EA + 2) << 16;
+ ret = (uint64_t)glue(lduwr, MEMSUFFIX)(EA) << 48;
+ ret |= (uint64_t)glue(lduwr, MEMSUFFIX)(EA + 2) << 16;
return ret;
}
PPC_SPE_LD_OP(whe_le, spe_lwhe_le);
static always_inline void glue(spe_stwhe_le, MEMSUFFIX) (target_ulong EA,
uint64_t data)
{
- glue(st16r, MEMSUFFIX)(EA, data >> 48);
- glue(st16r, MEMSUFFIX)(EA + 2, data >> 16);
+ glue(stwr, MEMSUFFIX)(EA, data >> 48);
+ glue(stwr, MEMSUFFIX)(EA + 2, data >> 16);
}
PPC_SPE_ST_OP(whe_le, spe_stwhe_le);
static always_inline uint64_t glue(spe_lwhou, MEMSUFFIX) (target_ulong EA)
@@ -1290,24 +1173,24 @@ PPC_SPE_ST_OP(who, spe_stwho);
static always_inline uint64_t glue(spe_lwhou_le, MEMSUFFIX) (target_ulong EA)
{
uint64_t ret;
- ret = (uint64_t)glue(ld16r, MEMSUFFIX)(EA) << 32;
- ret |= (uint64_t)glue(ld16r, MEMSUFFIX)(EA + 2);
+ ret = (uint64_t)glue(lduwr, MEMSUFFIX)(EA) << 32;
+ ret |= (uint64_t)glue(lduwr, MEMSUFFIX)(EA + 2);
return ret;
}
PPC_SPE_LD_OP(whou_le, spe_lwhou_le);
static always_inline uint64_t glue(spe_lwhos_le, MEMSUFFIX) (target_ulong EA)
{
uint64_t ret;
- ret = ((uint64_t)((int32_t)glue(ld16rs, MEMSUFFIX)(EA))) << 32;
- ret |= (uint64_t)((int32_t)glue(ld16rs, MEMSUFFIX)(EA + 2));
+ ret = ((uint64_t)((int32_t)glue(ldswr, MEMSUFFIX)(EA))) << 32;
+ ret |= (uint64_t)((int32_t)glue(ldswr, MEMSUFFIX)(EA + 2));
return ret;
}
PPC_SPE_LD_OP(whos_le, spe_lwhos_le);
static always_inline void glue(spe_stwho_le, MEMSUFFIX) (target_ulong EA,
uint64_t data)
{
- glue(st16r, MEMSUFFIX)(EA, data >> 32);
- glue(st16r, MEMSUFFIX)(EA + 2, data);
+ glue(stwr, MEMSUFFIX)(EA, data >> 32);
+ glue(stwr, MEMSUFFIX)(EA + 2, data);
}
PPC_SPE_ST_OP(who_le, spe_stwho_le);
#if !defined(TARGET_PPC64)
@@ -1320,7 +1203,7 @@ PPC_SPE_ST_OP(wwo, spe_stwwo);
static always_inline void glue(spe_stwwo_le, MEMSUFFIX) (target_ulong EA,
uint64_t data)
{
- glue(st32r, MEMSUFFIX)(EA, data);
+ glue(stlr, MEMSUFFIX)(EA, data);
}
PPC_SPE_ST_OP(wwo_le, spe_stwwo_le);
#endif
@@ -1334,7 +1217,7 @@ PPC_SPE_LD_OP(h, spe_lh);
static always_inline uint64_t glue(spe_lh_le, MEMSUFFIX) (target_ulong EA)
{
uint16_t tmp;
- tmp = glue(ld16r, MEMSUFFIX)(EA);
+ tmp = glue(lduwr, MEMSUFFIX)(EA);
return ((uint64_t)tmp << 48) | ((uint64_t)tmp << 16);
}
PPC_SPE_LD_OP(h_le, spe_lh_le);
@@ -1349,7 +1232,7 @@ static always_inline
uint64_t glue(spe_lwwsplat_le, MEMSUFFIX) (target_ulong EA)
{
uint32_t tmp;
- tmp = glue(ld32r, MEMSUFFIX)(EA);
+ tmp = glue(ldlr, MEMSUFFIX)(EA);
return ((uint64_t)tmp << 32) | (uint64_t)tmp;
}
PPC_SPE_LD_OP(wwsplat_le, spe_lwwsplat_le);
@@ -1369,9 +1252,9 @@ uint64_t glue(spe_lwhsplat_le, MEMSUFFIX
{
uint64_t ret;
uint16_t tmp;
- tmp = glue(ld16r, MEMSUFFIX)(EA);
+ tmp = glue(lduwr, MEMSUFFIX)(EA);
ret = ((uint64_t)tmp << 48) | ((uint64_t)tmp << 32);
- tmp = glue(ld16r, MEMSUFFIX)(EA + 2);
+ tmp = glue(lduwr, MEMSUFFIX)(EA + 2);
ret |= ((uint64_t)tmp << 16) | (uint64_t)tmp;
return ret;
}
Index: target-sh4/exec.h
===================================================================
RCS file: /sources/qemu/qemu/target-sh4/exec.h,v
retrieving revision 1.5
diff -u -d -d -p -r1.5 exec.h
--- target-sh4/exec.h 16 Sep 2007 21:08:05 -0000 1.5
+++ target-sh4/exec.h 12 Oct 2007 07:14:49 -0000
@@ -48,6 +48,9 @@ static inline int cpu_halted(CPUState *e
#ifndef CONFIG_USER_ONLY
#include "softmmu_exec.h"
+#define REVERSE_ENDIAN
+#include "softmmu_exec.h"
+#undef REVERSE_ENDIAN
#endif
#define RETURN() __asm__ __volatile__("")
Index: target-sh4/op_helper.c
===================================================================
RCS file: /sources/qemu/qemu/target-sh4/op_helper.c,v
retrieving revision 1.4
diff -u -d -d -p -r1.4 op_helper.c
--- target-sh4/op_helper.c 16 Sep 2007 21:08:05 -0000 1.4
+++ target-sh4/op_helper.c 12 Oct 2007 07:14:50 -0000
@@ -30,6 +30,7 @@ void do_raise_exception(void)
#define MMUSUFFIX _mmu
#define GETPC() (__builtin_return_address(0))
+/* Native-endian */
#define SHIFT 0
#include "softmmu_template.h"
@@ -42,6 +43,21 @@ void do_raise_exception(void)
#define SHIFT 3
#include "softmmu_template.h"
+/* Reverse-endian */
+#define REVERSE_ENDIAN
+#define SHIFT 0
+#include "softmmu_template.h"
+
+#define SHIFT 1
+#include "softmmu_template.h"
+
+#define SHIFT 2
+#include "softmmu_template.h"
+
+#define SHIFT 3
+#include "softmmu_template.h"
+#undef REVERSE_ENDIAN
+
void tlb_fill(target_ulong addr, int is_write, int is_user, void *retaddr)
{
TranslationBlock *tb;
Index: target-sparc/exec.h
===================================================================
RCS file: /sources/qemu/qemu/target-sparc/exec.h,v
retrieving revision 1.21
diff -u -d -d -p -r1.21 exec.h
--- target-sparc/exec.h 30 Sep 2007 19:38:11 -0000 1.21
+++ target-sparc/exec.h 12 Oct 2007 07:14:50 -0000
@@ -100,6 +100,9 @@ void do_rdpsr();
/* XXX: move that to a generic header */
#if !defined(CONFIG_USER_ONLY)
#include "softmmu_exec.h"
+#define REVERSE_ENDIAN
+#include "softmmu_exec.h"
+#undef REVERSE_ENDIAN
#endif /* !defined(CONFIG_USER_ONLY) */
static inline void env_to_regs(void)
Index: target-sparc/op_helper.c
===================================================================
RCS file: /sources/qemu/qemu/target-sparc/op_helper.c,v
retrieving revision 1.41
diff -u -d -d -p -r1.41 op_helper.c
--- target-sparc/op_helper.c 1 Oct 2007 17:07:58 -0000 1.41
+++ target-sparc/op_helper.c 12 Oct 2007 07:14:50 -0000
@@ -1497,6 +1497,21 @@ static void do_unaligned_access(target_u
#define ALIGNED_ONLY
#define GETPC() (__builtin_return_address(0))
+/* Native-endian */
+#define SHIFT 0
+#include "softmmu_template.h"
+
+#define SHIFT 1
+#include "softmmu_template.h"
+
+#define SHIFT 2
+#include "softmmu_template.h"
+
+#define SHIFT 3
+#include "softmmu_template.h"
+
+/* Reverse-endian */
+#define REVERSE_ENDIAN
#define SHIFT 0
#include "softmmu_template.h"
@@ -1508,6 +1523,7 @@ static void do_unaligned_access(target_u
#define SHIFT 3
#include "softmmu_template.h"
+#undef REVERSE_ENDIAN
static void do_unaligned_access(target_ulong addr, int is_write, int is_user,
void *retaddr)
next reply other threads:[~2007-10-13 9:56 UTC|newest]
Thread overview: 20+ messages / expand[flat|nested] mbox.gz Atom feed top
2007-10-13 9:56 J. Mayer [this message]
2007-10-13 10:47 ` [Qemu-devel] RFC: reverse-endian softmmu memory accessors Blue Swirl
2007-10-13 12:43 ` J. Mayer
2007-10-13 13:07 ` Blue Swirl
2007-10-13 14:17 ` J. Mayer
2007-10-13 22:07 ` J. Mayer
2007-10-13 22:53 ` Thiemo Seufer
2007-10-14 8:19 ` Blue Swirl
2007-10-14 10:14 ` J. Mayer
2007-10-14 13:22 ` Thiemo Seufer
2007-10-15 11:55 ` J. Mayer
2007-10-13 13:02 ` Thiemo Seufer
-- strict thread matches above, loose matches on Subject: below --
2007-10-14 11:49 J. Mayer
2007-10-14 12:59 ` Blue Swirl
2007-10-15 12:10 ` J. Mayer
2007-10-15 16:02 ` Blue Swirl
2007-10-15 17:45 ` Blue Swirl
2007-10-16 20:27 ` J. Mayer
2007-11-23 12:55 ` Tero Kaarlela
2007-10-15 21:06 ` J. Mayer
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=1192269372.9976.305.camel@rapid \
--to=l_indien@magic.fr \
--cc=qemu-devel@nongnu.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).