* [Qemu-devel] [PATCH 0/3] target-mips: Add support for misaligned accesses @ 2015-05-01 15:24 Yongbok Kim 2015-05-01 15:24 ` [Qemu-devel] [PATCH 1/3] softmmu: Add size argument to do_unaligned_access() Yongbok Kim ` (2 more replies) 0 siblings, 3 replies; 9+ messages in thread From: Yongbok Kim @ 2015-05-01 15:24 UTC (permalink / raw) To: qemu-devel; +Cc: Leon.Alrae This patch set adds support for misaligned memory accesses in MIPS architecture Release 6 and MIPS SIMD Architecture. The behaviour, semantics, and architecture specifications of misaligned memory accesses are described in: MIPS Architecture For Programmers Volume I-A: Introduction to the MIPS64 Architecture, Appendix B Misaligned Memory Accesses. Available at http://www.imgtec.com/mips/architectures/mips64.asp Regards, Yongbok Yongbok Kim (3): softmmu: Add size argument to do_unaligned_access() target-mips: Misaligned Memory Accesses for R6 target-mips: Misaligned Memory Accesses for MSA include/qom/cpu.h | 7 +++-- softmmu_template.h | 24 +++++++++++----------- target-alpha/cpu-qom.h | 3 +- target-alpha/mem_helper.c | 3 +- target-mips/cpu-qom.h | 3 +- target-mips/cpu.h | 2 + target-mips/helper.c | 36 +++++++++++++++++++++++++++++++++ target-mips/op_helper.c | 45 +++++++++++++++++++++++++++++++++++++++++- target-mips/translate_init.c | 2 +- target-sparc/cpu-qom.h | 3 +- target-sparc/ldst_helper.c | 3 +- target-xtensa/cpu-qom.h | 3 +- target-xtensa/op_helper.c | 2 +- 13 files changed, 112 insertions(+), 24 deletions(-) -- 1.7.5.4 ^ permalink raw reply [flat|nested] 9+ messages in thread
* [Qemu-devel] [PATCH 1/3] softmmu: Add size argument to do_unaligned_access() 2015-05-01 15:24 [Qemu-devel] [PATCH 0/3] target-mips: Add support for misaligned accesses Yongbok Kim @ 2015-05-01 15:24 ` Yongbok Kim 2015-05-01 15:24 ` [Qemu-devel] [PATCH 2/3] target-mips: Misaligned Memory Accesses for R6 Yongbok Kim 2015-05-01 15:24 ` [Qemu-devel] [PATCH 3/3] target-mips: Misaligned Memory Accesses for MSA Yongbok Kim 2 siblings, 0 replies; 9+ messages in thread From: Yongbok Kim @ 2015-05-01 15:24 UTC (permalink / raw) To: qemu-devel; +Cc: Leon.Alrae Pass a data size argument to do_unaligned_access(). It is unable to find if an access spans two pages without the data size in the call back function. Signed-off-by: Yongbok Kim <yongbok.kim@imgtec.com> --- include/qom/cpu.h | 7 ++++--- softmmu_template.h | 24 ++++++++++++------------ target-alpha/cpu-qom.h | 3 ++- target-alpha/mem_helper.c | 3 ++- target-mips/cpu-qom.h | 3 ++- target-mips/op_helper.c | 2 +- target-sparc/cpu-qom.h | 3 ++- target-sparc/ldst_helper.c | 3 ++- target-xtensa/cpu-qom.h | 3 ++- target-xtensa/op_helper.c | 2 +- 10 files changed, 30 insertions(+), 23 deletions(-) diff --git a/include/qom/cpu.h b/include/qom/cpu.h index 39f0f19..6ba2dad 100644 --- a/include/qom/cpu.h +++ b/include/qom/cpu.h @@ -134,7 +134,8 @@ typedef struct CPUClass { void (*do_interrupt)(CPUState *cpu); CPUUnassignedAccess do_unassigned_access; void (*do_unaligned_access)(CPUState *cpu, vaddr addr, - int is_write, int is_user, uintptr_t retaddr); + int is_write, int is_user, uintptr_t retaddr, + unsigned size); bool (*virtio_is_big_endian)(CPUState *cpu); int (*memory_rw_debug)(CPUState *cpu, vaddr addr, uint8_t *buf, int len, bool is_write); @@ -591,11 +592,11 @@ static inline void cpu_unassigned_access(CPUState *cpu, hwaddr addr, static inline void cpu_unaligned_access(CPUState *cpu, vaddr addr, int is_write, int is_user, - uintptr_t retaddr) + uintptr_t retaddr, unsigned size) { CPUClass *cc = CPU_GET_CLASS(cpu); - cc->do_unaligned_access(cpu, addr, is_write, is_user, retaddr); + cc->do_unaligned_access(cpu, addr, is_write, is_user, retaddr, size); } #endif diff --git a/softmmu_template.h b/softmmu_template.h index 16b0852..d896b9c 100644 --- a/softmmu_template.h +++ b/softmmu_template.h @@ -185,7 +185,7 @@ WORD_TYPE helper_le_ld_name(CPUArchState *env, target_ulong addr, int mmu_idx, #ifdef ALIGNED_ONLY if ((addr & (DATA_SIZE - 1)) != 0) { cpu_unaligned_access(ENV_GET_CPU(env), addr, READ_ACCESS_TYPE, - mmu_idx, retaddr); + mmu_idx, retaddr, DATA_SIZE); } #endif if (!VICTIM_TLB_HIT(ADDR_READ)) { @@ -220,7 +220,7 @@ WORD_TYPE helper_le_ld_name(CPUArchState *env, target_ulong addr, int mmu_idx, do_unaligned_access: #ifdef ALIGNED_ONLY cpu_unaligned_access(ENV_GET_CPU(env), addr, READ_ACCESS_TYPE, - mmu_idx, retaddr); + mmu_idx, retaddr, DATA_SIZE); #endif addr1 = addr & ~(DATA_SIZE - 1); addr2 = addr1 + DATA_SIZE; @@ -239,7 +239,7 @@ WORD_TYPE helper_le_ld_name(CPUArchState *env, target_ulong addr, int mmu_idx, #ifdef ALIGNED_ONLY if ((addr & (DATA_SIZE - 1)) != 0) { cpu_unaligned_access(ENV_GET_CPU(env), addr, READ_ACCESS_TYPE, - mmu_idx, retaddr); + mmu_idx, retaddr, DATA_SIZE); } #endif @@ -273,7 +273,7 @@ WORD_TYPE helper_be_ld_name(CPUArchState *env, target_ulong addr, int mmu_idx, #ifdef ALIGNED_ONLY if ((addr & (DATA_SIZE - 1)) != 0) { cpu_unaligned_access(ENV_GET_CPU(env), addr, READ_ACCESS_TYPE, - mmu_idx, retaddr); + mmu_idx, retaddr, DATA_SIZE); } #endif if (!VICTIM_TLB_HIT(ADDR_READ)) { @@ -308,7 +308,7 @@ WORD_TYPE helper_be_ld_name(CPUArchState *env, target_ulong addr, int mmu_idx, do_unaligned_access: #ifdef ALIGNED_ONLY cpu_unaligned_access(ENV_GET_CPU(env), addr, READ_ACCESS_TYPE, - mmu_idx, retaddr); + mmu_idx, retaddr, DATA_SIZE); #endif addr1 = addr & ~(DATA_SIZE - 1); addr2 = addr1 + DATA_SIZE; @@ -327,7 +327,7 @@ WORD_TYPE helper_be_ld_name(CPUArchState *env, target_ulong addr, int mmu_idx, #ifdef ALIGNED_ONLY if ((addr & (DATA_SIZE - 1)) != 0) { cpu_unaligned_access(ENV_GET_CPU(env), addr, READ_ACCESS_TYPE, - mmu_idx, retaddr); + mmu_idx, retaddr, DATA_SIZE); } #endif @@ -401,7 +401,7 @@ void helper_le_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val, #ifdef ALIGNED_ONLY if ((addr & (DATA_SIZE - 1)) != 0) { cpu_unaligned_access(ENV_GET_CPU(env), addr, MMU_DATA_STORE, - mmu_idx, retaddr); + mmu_idx, retaddr, DATA_SIZE); } #endif if (!VICTIM_TLB_HIT(addr_write)) { @@ -433,7 +433,7 @@ void helper_le_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val, do_unaligned_access: #ifdef ALIGNED_ONLY cpu_unaligned_access(ENV_GET_CPU(env), addr, MMU_DATA_STORE, - mmu_idx, retaddr); + mmu_idx, retaddr, DATA_SIZE); #endif /* XXX: not efficient, but simple */ /* Note: relies on the fact that tlb_fill() does not remove the @@ -453,7 +453,7 @@ void helper_le_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val, #ifdef ALIGNED_ONLY if ((addr & (DATA_SIZE - 1)) != 0) { cpu_unaligned_access(ENV_GET_CPU(env), addr, MMU_DATA_STORE, - mmu_idx, retaddr); + mmu_idx, retaddr, DATA_SIZE); } #endif @@ -482,7 +482,7 @@ void helper_be_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val, #ifdef ALIGNED_ONLY if ((addr & (DATA_SIZE - 1)) != 0) { cpu_unaligned_access(ENV_GET_CPU(env), addr, MMU_DATA_STORE, - mmu_idx, retaddr); + mmu_idx, retaddr, DATA_SIZE); } #endif if (!VICTIM_TLB_HIT(addr_write)) { @@ -514,7 +514,7 @@ void helper_be_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val, do_unaligned_access: #ifdef ALIGNED_ONLY cpu_unaligned_access(ENV_GET_CPU(env), addr, MMU_DATA_STORE, - mmu_idx, retaddr); + mmu_idx, retaddr, DATA_SIZE); #endif /* XXX: not efficient, but simple */ /* Note: relies on the fact that tlb_fill() does not remove the @@ -534,7 +534,7 @@ void helper_be_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val, #ifdef ALIGNED_ONLY if ((addr & (DATA_SIZE - 1)) != 0) { cpu_unaligned_access(ENV_GET_CPU(env), addr, MMU_DATA_STORE, - mmu_idx, retaddr); + mmu_idx, retaddr, DATA_SIZE); } #endif diff --git a/target-alpha/cpu-qom.h b/target-alpha/cpu-qom.h index b01c6c8..273a8ed 100644 --- a/target-alpha/cpu-qom.h +++ b/target-alpha/cpu-qom.h @@ -86,6 +86,7 @@ hwaddr alpha_cpu_get_phys_page_debug(CPUState *cpu, vaddr addr); int alpha_cpu_gdb_read_register(CPUState *cpu, uint8_t *buf, int reg); int alpha_cpu_gdb_write_register(CPUState *cpu, uint8_t *buf, int reg); void alpha_cpu_do_unaligned_access(CPUState *cpu, vaddr addr, - int is_write, int is_user, uintptr_t retaddr); + int is_write, int is_user, uintptr_t retaddr, + unsigned size); #endif diff --git a/target-alpha/mem_helper.c b/target-alpha/mem_helper.c index fc4f57a..1d4666a 100644 --- a/target-alpha/mem_helper.c +++ b/target-alpha/mem_helper.c @@ -97,7 +97,8 @@ uint64_t helper_stq_c_phys(CPUAlphaState *env, uint64_t p, uint64_t v) } void alpha_cpu_do_unaligned_access(CPUState *cs, vaddr addr, - int is_write, int is_user, uintptr_t retaddr) + int is_write, int is_user, uintptr_t retaddr, + unsigned size) { AlphaCPU *cpu = ALPHA_CPU(cs); CPUAlphaState *env = &cpu->env; diff --git a/target-mips/cpu-qom.h b/target-mips/cpu-qom.h index 4d6f9de..6d54314 100644 --- a/target-mips/cpu-qom.h +++ b/target-mips/cpu-qom.h @@ -86,6 +86,7 @@ hwaddr mips_cpu_get_phys_page_debug(CPUState *cpu, vaddr addr); int mips_cpu_gdb_read_register(CPUState *cpu, uint8_t *buf, int reg); int mips_cpu_gdb_write_register(CPUState *cpu, uint8_t *buf, int reg); void mips_cpu_do_unaligned_access(CPUState *cpu, vaddr addr, - int is_write, int is_user, uintptr_t retaddr); + int is_write, int is_user, uintptr_t retaddr, + unsigned size); #endif diff --git a/target-mips/op_helper.c b/target-mips/op_helper.c index 73a8e45..ca5fe43 100644 --- a/target-mips/op_helper.c +++ b/target-mips/op_helper.c @@ -2208,7 +2208,7 @@ void helper_wait(CPUMIPSState *env) void mips_cpu_do_unaligned_access(CPUState *cs, vaddr addr, int access_type, int is_user, - uintptr_t retaddr) + uintptr_t retaddr, unsigned size) { MIPSCPU *cpu = MIPS_CPU(cs); CPUMIPSState *env = &cpu->env; diff --git a/target-sparc/cpu-qom.h b/target-sparc/cpu-qom.h index 477c4d5..4bfbb84 100644 --- a/target-sparc/cpu-qom.h +++ b/target-sparc/cpu-qom.h @@ -83,6 +83,7 @@ int sparc_cpu_gdb_read_register(CPUState *cpu, uint8_t *buf, int reg); int sparc_cpu_gdb_write_register(CPUState *cpu, uint8_t *buf, int reg); void QEMU_NORETURN sparc_cpu_do_unaligned_access(CPUState *cpu, vaddr addr, int is_write, - int is_user, uintptr_t retaddr); + int is_user, uintptr_t retaddr, + unsigned size); #endif diff --git a/target-sparc/ldst_helper.c b/target-sparc/ldst_helper.c index c7ad47d..7837d64 100644 --- a/target-sparc/ldst_helper.c +++ b/target-sparc/ldst_helper.c @@ -2420,7 +2420,8 @@ void sparc_cpu_unassigned_access(CPUState *cs, hwaddr addr, #if !defined(CONFIG_USER_ONLY) void QEMU_NORETURN sparc_cpu_do_unaligned_access(CPUState *cs, vaddr addr, int is_write, - int is_user, uintptr_t retaddr) + int is_user, uintptr_t retaddr, + unsigned size) { SPARCCPU *cpu = SPARC_CPU(cs); CPUSPARCState *env = &cpu->env; diff --git a/target-xtensa/cpu-qom.h b/target-xtensa/cpu-qom.h index 2258224..2508228 100644 --- a/target-xtensa/cpu-qom.h +++ b/target-xtensa/cpu-qom.h @@ -94,6 +94,7 @@ hwaddr xtensa_cpu_get_phys_page_debug(CPUState *cpu, vaddr addr); int xtensa_cpu_gdb_read_register(CPUState *cpu, uint8_t *buf, int reg); int xtensa_cpu_gdb_write_register(CPUState *cpu, uint8_t *buf, int reg); void xtensa_cpu_do_unaligned_access(CPUState *cpu, vaddr addr, - int is_write, int is_user, uintptr_t retaddr); + int is_write, int is_user, + uintptr_t retaddr, unsigned size); #endif diff --git a/target-xtensa/op_helper.c b/target-xtensa/op_helper.c index be657e6..0804791 100644 --- a/target-xtensa/op_helper.c +++ b/target-xtensa/op_helper.c @@ -33,7 +33,7 @@ #include "qemu/timer.h" void xtensa_cpu_do_unaligned_access(CPUState *cs, - vaddr addr, int is_write, int is_user, uintptr_t retaddr) + vaddr addr, int is_write, int is_user, uintptr_t retaddr, unsigned size) { XtensaCPU *cpu = XTENSA_CPU(cs); CPUXtensaState *env = &cpu->env; -- 1.7.5.4 ^ permalink raw reply related [flat|nested] 9+ messages in thread
* [Qemu-devel] [PATCH 2/3] target-mips: Misaligned Memory Accesses for R6 2015-05-01 15:24 [Qemu-devel] [PATCH 0/3] target-mips: Add support for misaligned accesses Yongbok Kim 2015-05-01 15:24 ` [Qemu-devel] [PATCH 1/3] softmmu: Add size argument to do_unaligned_access() Yongbok Kim @ 2015-05-01 15:24 ` Yongbok Kim 2015-05-01 15:39 ` Peter Maydell 2015-05-01 15:24 ` [Qemu-devel] [PATCH 3/3] target-mips: Misaligned Memory Accesses for MSA Yongbok Kim 2 siblings, 1 reply; 9+ messages in thread From: Yongbok Kim @ 2015-05-01 15:24 UTC (permalink / raw) To: qemu-devel; +Cc: Leon.Alrae Release 6 requires misaligned memory access support for all ordinary memory access instructions (for example, LW/SW, LWC1/SWC1). However misaligned support is not provided for certain special memory accesses such as atomics (for example, LL/SC). In the mips_cpu_do_unaligned_access() callback, if it is R6 core it checks further whether the address is valid. Signed-off-by: Yongbok Kim <yongbok.kim@imgtec.com> --- target-mips/cpu.h | 2 ++ target-mips/helper.c | 36 ++++++++++++++++++++++++++++++++++++ target-mips/op_helper.c | 13 +++++++++++++ target-mips/translate_init.c | 2 +- 4 files changed, 52 insertions(+), 1 deletions(-) diff --git a/target-mips/cpu.h b/target-mips/cpu.h index f9d2b4c..6586d89 100644 --- a/target-mips/cpu.h +++ b/target-mips/cpu.h @@ -760,6 +760,8 @@ int mips_cpu_handle_mmu_fault(CPUState *cpu, vaddr address, int rw, void r4k_invalidate_tlb (CPUMIPSState *env, int idx, int use_extra); hwaddr cpu_mips_translate_address (CPUMIPSState *env, target_ulong address, int rw); +bool cpu_mips_validate_access(CPUMIPSState *env, target_ulong address, + target_ulong badvaddr, unsigned data_size, int rw); #endif target_ulong exception_resume_pc (CPUMIPSState *env); diff --git a/target-mips/helper.c b/target-mips/helper.c index 8e3204a..6c44124 100644 --- a/target-mips/helper.c +++ b/target-mips/helper.c @@ -391,6 +391,42 @@ hwaddr cpu_mips_translate_address(CPUMIPSState *env, target_ulong address, int r } } +bool cpu_mips_validate_access(CPUMIPSState *env, target_ulong address, + target_ulong badvaddr, unsigned data_size, int rw) +{ + hwaddr physical; + int prot; + int access_type = ACCESS_INT; + int ret; + target_ulong addr; + + addr = address & ~((target_ulong) data_size - 1); + ret = get_physical_address(env, &physical, &prot, + addr, rw, access_type); + if (ret != TLBRET_MATCH) { + if (ret != TLBRET_BADADDR && addr > badvaddr) { + badvaddr = addr; + } + raise_mmu_exception(env, badvaddr, rw, ret); + return false; + } + if ((data_size > 1) + && unlikely(((address & ~TARGET_PAGE_MASK) + data_size - 1) + >= TARGET_PAGE_SIZE)) { + addr += data_size; + ret = get_physical_address(env, &physical, &prot, addr, rw, + access_type); + if (ret != TLBRET_MATCH) { + if (ret != TLBRET_BADADDR) { + badvaddr = addr; + } + raise_mmu_exception(env, badvaddr, rw, ret); + return false; + } + } + return true; +} + static const char * const excp_names[EXCP_LAST + 1] = { [EXCP_RESET] = "reset", [EXCP_SRESET] = "soft reset", diff --git a/target-mips/op_helper.c b/target-mips/op_helper.c index ca5fe43..dacc92b 100644 --- a/target-mips/op_helper.c +++ b/target-mips/op_helper.c @@ -2215,6 +2215,19 @@ void mips_cpu_do_unaligned_access(CPUState *cs, vaddr addr, int error_code = 0; int excp; + if (env->insn_flags & ISA_MIPS32R6) { + /* Release 6 provides support for misaligned memory access for + * all ordinary memory reference instructions + * */ + if (!cpu_mips_validate_access(env, addr, addr, size, access_type)) { + CPUState *cs = CPU(mips_env_get_cpu(env)); + do_raise_exception_err(env, cs->exception_index, + env->error_code, retaddr); + return; + } + return; + } + env->CP0_BadVAddr = addr; if (access_type == MMU_DATA_STORE) { diff --git a/target-mips/translate_init.c b/target-mips/translate_init.c index 85a65e7..ec54fef 100644 --- a/target-mips/translate_init.c +++ b/target-mips/translate_init.c @@ -607,7 +607,7 @@ static const mips_def_t mips_defs[] = }, { /* A generic CPU supporting MIPS64 Release 6 ISA. - FIXME: Support IEEE 754-2008 FP and misaligned memory accesses. + FIXME: Support IEEE 754-2008 FP. Eventually this should be replaced by a real CPU model. */ .name = "MIPS64R6-generic", .CP0_PRid = 0x00010000, -- 1.7.5.4 ^ permalink raw reply related [flat|nested] 9+ messages in thread
* Re: [Qemu-devel] [PATCH 2/3] target-mips: Misaligned Memory Accesses for R6 2015-05-01 15:24 ` [Qemu-devel] [PATCH 2/3] target-mips: Misaligned Memory Accesses for R6 Yongbok Kim @ 2015-05-01 15:39 ` Peter Maydell 0 siblings, 0 replies; 9+ messages in thread From: Peter Maydell @ 2015-05-01 15:39 UTC (permalink / raw) To: Yongbok Kim; +Cc: Leon Alrae, QEMU Developers On 1 May 2015 at 16:24, Yongbok Kim <yongbok.kim@imgtec.com> wrote: > Release 6 requires misaligned memory access support for all ordinary memory > access instructions (for example, LW/SW, LWC1/SWC1). > However misaligned support is not provided for certain special memory accesses > such as atomics (for example, LL/SC). > > In the mips_cpu_do_unaligned_access() callback, if it is R6 core it checks further > whether the address is valid. > > Signed-off-by: Yongbok Kim <yongbok.kim@imgtec.com> > --- > target-mips/cpu.h | 2 ++ > target-mips/helper.c | 36 ++++++++++++++++++++++++++++++++++++ > target-mips/op_helper.c | 13 +++++++++++++ > target-mips/translate_init.c | 2 +- > 4 files changed, 52 insertions(+), 1 deletions(-) > > diff --git a/target-mips/cpu.h b/target-mips/cpu.h > index f9d2b4c..6586d89 100644 > --- a/target-mips/cpu.h > +++ b/target-mips/cpu.h > @@ -760,6 +760,8 @@ int mips_cpu_handle_mmu_fault(CPUState *cpu, vaddr address, int rw, > void r4k_invalidate_tlb (CPUMIPSState *env, int idx, int use_extra); > hwaddr cpu_mips_translate_address (CPUMIPSState *env, target_ulong address, > int rw); > +bool cpu_mips_validate_access(CPUMIPSState *env, target_ulong address, > + target_ulong badvaddr, unsigned data_size, int rw); > #endif > target_ulong exception_resume_pc (CPUMIPSState *env); > > diff --git a/target-mips/helper.c b/target-mips/helper.c > index 8e3204a..6c44124 100644 > --- a/target-mips/helper.c > +++ b/target-mips/helper.c > @@ -391,6 +391,42 @@ hwaddr cpu_mips_translate_address(CPUMIPSState *env, target_ulong address, int r > } > } > > +bool cpu_mips_validate_access(CPUMIPSState *env, target_ulong address, > + target_ulong badvaddr, unsigned data_size, int rw) > +{ > + hwaddr physical; > + int prot; > + int access_type = ACCESS_INT; > + int ret; > + target_ulong addr; > + > + addr = address & ~((target_ulong) data_size - 1); > + ret = get_physical_address(env, &physical, &prot, > + addr, rw, access_type); Isn't a recheck of the TLB on every unaligned access going to have poor performance? If we know from the type of the insn whether an unaligned access is OK or not there ought to be some way to avoid doing the check at all... -- PMM ^ permalink raw reply [flat|nested] 9+ messages in thread
* [Qemu-devel] [PATCH 3/3] target-mips: Misaligned Memory Accesses for MSA 2015-05-01 15:24 [Qemu-devel] [PATCH 0/3] target-mips: Add support for misaligned accesses Yongbok Kim 2015-05-01 15:24 ` [Qemu-devel] [PATCH 1/3] softmmu: Add size argument to do_unaligned_access() Yongbok Kim 2015-05-01 15:24 ` [Qemu-devel] [PATCH 2/3] target-mips: Misaligned Memory Accesses for R6 Yongbok Kim @ 2015-05-01 15:24 ` Yongbok Kim 2015-05-01 15:43 ` Peter Maydell 2015-05-05 19:54 ` Leon Alrae 2 siblings, 2 replies; 9+ messages in thread From: Yongbok Kim @ 2015-05-01 15:24 UTC (permalink / raw) To: qemu-devel; +Cc: Leon.Alrae MIPS SIMD Architecture vector loads and stores require misalignment support. MSA Memory access should work as an atomic operation. Therefore, it has to check validity of all the addresses for the operation. Signed-off-by: Yongbok Kim <yongbok.kim@imgtec.com> --- target-mips/op_helper.c | 30 ++++++++++++++++++++++++++++++ 1 files changed, 30 insertions(+), 0 deletions(-) diff --git a/target-mips/op_helper.c b/target-mips/op_helper.c index dacc92b..89a7de6 100644 --- a/target-mips/op_helper.c +++ b/target-mips/op_helper.c @@ -3571,6 +3571,24 @@ FOP_CONDN_S(sne, (float32_lt(fst1, fst0, &env->active_fpu.fp_status) /* Element-by-element access macros */ #define DF_ELEMENTS(df) (MSA_WRLEN / DF_BITS(df)) +#if !defined(CONFIG_USER_ONLY) +static bool cpu_mips_validate_msa_block_access(CPUMIPSState *env, + target_ulong address, int df, int rw) +{ + int i; + for (i = 0; i < DF_ELEMENTS(df); i++) { + if (!cpu_mips_validate_access(env, address + (i << df), + address, (1 << df), rw)) { + CPUState *cs = CPU(mips_env_get_cpu(env)); + helper_raise_exception_err(env, cs->exception_index, + env->error_code); + return false; + } + } + return true; +} +#endif + void helper_msa_ld_df(CPUMIPSState *env, uint32_t df, uint32_t wd, uint32_t rs, int32_t s10) { @@ -3578,6 +3596,12 @@ void helper_msa_ld_df(CPUMIPSState *env, uint32_t df, uint32_t wd, uint32_t rs, target_ulong addr = env->active_tc.gpr[rs] + (s10 << df); int i; +#if !defined(CONFIG_USER_ONLY) + if (!cpu_mips_validate_msa_block_access(env, addr, df, MMU_DATA_LOAD)) { + return; + } +#endif + switch (df) { case DF_BYTE: for (i = 0; i < DF_ELEMENTS(DF_BYTE); i++) { @@ -3613,6 +3637,12 @@ void helper_msa_st_df(CPUMIPSState *env, uint32_t df, uint32_t wd, uint32_t rs, target_ulong addr = env->active_tc.gpr[rs] + (s10 << df); int i; +#if !defined(CONFIG_USER_ONLY) + if (!cpu_mips_validate_msa_block_access(env, addr, df, MMU_DATA_STORE)) { + return; + } +#endif + switch (df) { case DF_BYTE: for (i = 0; i < DF_ELEMENTS(DF_BYTE); i++) { -- 1.7.5.4 ^ permalink raw reply related [flat|nested] 9+ messages in thread
* Re: [Qemu-devel] [PATCH 3/3] target-mips: Misaligned Memory Accesses for MSA 2015-05-01 15:24 ` [Qemu-devel] [PATCH 3/3] target-mips: Misaligned Memory Accesses for MSA Yongbok Kim @ 2015-05-01 15:43 ` Peter Maydell 2015-05-01 15:55 ` Yongbok Kim 2015-05-05 14:51 ` Leon Alrae 2015-05-05 19:54 ` Leon Alrae 1 sibling, 2 replies; 9+ messages in thread From: Peter Maydell @ 2015-05-01 15:43 UTC (permalink / raw) To: Yongbok Kim; +Cc: Leon Alrae, QEMU Developers On 1 May 2015 at 16:24, Yongbok Kim <yongbok.kim@imgtec.com> wrote: > MIPS SIMD Architecture vector loads and stores require misalignment support. > MSA Memory access should work as an atomic operation. Therefore, it has to > check validity of all the addresses for the operation. > > Signed-off-by: Yongbok Kim <yongbok.kim@imgtec.com> > --- > target-mips/op_helper.c | 30 ++++++++++++++++++++++++++++++ > 1 files changed, 30 insertions(+), 0 deletions(-) > > diff --git a/target-mips/op_helper.c b/target-mips/op_helper.c > index dacc92b..89a7de6 100644 > --- a/target-mips/op_helper.c > +++ b/target-mips/op_helper.c > @@ -3571,6 +3571,24 @@ FOP_CONDN_S(sne, (float32_lt(fst1, fst0, &env->active_fpu.fp_status) > /* Element-by-element access macros */ > #define DF_ELEMENTS(df) (MSA_WRLEN / DF_BITS(df)) > > +#if !defined(CONFIG_USER_ONLY) > +static bool cpu_mips_validate_msa_block_access(CPUMIPSState *env, > + target_ulong address, int df, int rw) > +{ > + int i; > + for (i = 0; i < DF_ELEMENTS(df); i++) { > + if (!cpu_mips_validate_access(env, address + (i << df), > + address, (1 << df), rw)) { > + CPUState *cs = CPU(mips_env_get_cpu(env)); > + helper_raise_exception_err(env, cs->exception_index, > + env->error_code); I was wondering if this would get the correct PC in the exception case, but we always call save_cpu_state() before calling the msa_ld/st_df helpers, so it will. -- PMM ^ permalink raw reply [flat|nested] 9+ messages in thread
* Re: [Qemu-devel] [PATCH 3/3] target-mips: Misaligned Memory Accesses for MSA 2015-05-01 15:43 ` Peter Maydell @ 2015-05-01 15:55 ` Yongbok Kim 2015-05-05 14:51 ` Leon Alrae 1 sibling, 0 replies; 9+ messages in thread From: Yongbok Kim @ 2015-05-01 15:55 UTC (permalink / raw) To: Peter Maydell; +Cc: Leon Alrae, QEMU Developers On 01/05/2015 16:43, Peter Maydell wrote: > On 1 May 2015 at 16:24, Yongbok Kim <yongbok.kim@imgtec.com> wrote: >> MIPS SIMD Architecture vector loads and stores require misalignment support. >> MSA Memory access should work as an atomic operation. Therefore, it has to >> check validity of all the addresses for the operation. >> >> Signed-off-by: Yongbok Kim <yongbok.kim@imgtec.com> >> --- >> target-mips/op_helper.c | 30 ++++++++++++++++++++++++++++++ >> 1 files changed, 30 insertions(+), 0 deletions(-) >> >> diff --git a/target-mips/op_helper.c b/target-mips/op_helper.c >> index dacc92b..89a7de6 100644 >> --- a/target-mips/op_helper.c >> +++ b/target-mips/op_helper.c >> @@ -3571,6 +3571,24 @@ FOP_CONDN_S(sne, (float32_lt(fst1, fst0, &env->active_fpu.fp_status) >> /* Element-by-element access macros */ >> #define DF_ELEMENTS(df) (MSA_WRLEN / DF_BITS(df)) >> >> +#if !defined(CONFIG_USER_ONLY) >> +static bool cpu_mips_validate_msa_block_access(CPUMIPSState *env, >> + target_ulong address, int df, int rw) >> +{ >> + int i; >> + for (i = 0; i < DF_ELEMENTS(df); i++) { >> + if (!cpu_mips_validate_access(env, address + (i << df), >> + address, (1 << df), rw)) { >> + CPUState *cs = CPU(mips_env_get_cpu(env)); >> + helper_raise_exception_err(env, cs->exception_index, >> + env->error_code); > I was wondering if this would get the correct PC in the exception > case, but we always call save_cpu_state() before calling the > msa_ld/st_df helpers, so it will. > > -- PMM Yes it does because of the save_cpu_state(). Actually I have considered to use cpu_restore_state() with GETRA() but it looks like using save_cpu_state() is quite common in the target-mips. It would be good such clean-up for all the cases in the future work. But this patch I would follow existing style for the consistency. Regards, Yongbok ^ permalink raw reply [flat|nested] 9+ messages in thread
* Re: [Qemu-devel] [PATCH 3/3] target-mips: Misaligned Memory Accesses for MSA 2015-05-01 15:43 ` Peter Maydell 2015-05-01 15:55 ` Yongbok Kim @ 2015-05-05 14:51 ` Leon Alrae 1 sibling, 0 replies; 9+ messages in thread From: Leon Alrae @ 2015-05-05 14:51 UTC (permalink / raw) To: Peter Maydell, Yongbok Kim; +Cc: QEMU Developers On 01/05/2015 16:43, Peter Maydell wrote: >> +#if !defined(CONFIG_USER_ONLY) >> +static bool cpu_mips_validate_msa_block_access(CPUMIPSState *env, >> + target_ulong address, int df, int rw) >> +{ >> + int i; >> + for (i = 0; i < DF_ELEMENTS(df); i++) { >> + if (!cpu_mips_validate_access(env, address + (i << df), >> + address, (1 << df), rw)) { >> + CPUState *cs = CPU(mips_env_get_cpu(env)); >> + helper_raise_exception_err(env, cs->exception_index, >> + env->error_code); > > I was wondering if this would get the correct PC in the exception > case, but we always call save_cpu_state() before calling the > msa_ld/st_df helpers, so it will. FYI, quite recently Richard suggested a clean-up for this (and all other MIPS load/store instructions using helpers), so save_cpu_state() wouldn’t be required. I haven’t got round to it yet but sounds like a nice improvement. Leon ^ permalink raw reply [flat|nested] 9+ messages in thread
* Re: [Qemu-devel] [PATCH 3/3] target-mips: Misaligned Memory Accesses for MSA 2015-05-01 15:24 ` [Qemu-devel] [PATCH 3/3] target-mips: Misaligned Memory Accesses for MSA Yongbok Kim 2015-05-01 15:43 ` Peter Maydell @ 2015-05-05 19:54 ` Leon Alrae 1 sibling, 0 replies; 9+ messages in thread From: Leon Alrae @ 2015-05-05 19:54 UTC (permalink / raw) To: Yongbok Kim; +Cc: qemu-devel On 01/05/15 16:24, Yongbok Kim wrote: > MIPS SIMD Architecture vector loads and stores require misalignment support. > MSA Memory access should work as an atomic operation. Therefore, it has to > check validity of all the addresses for the operation. As far as I can tell mips_cpu_do_unaligned_access() will be still generating AdE exceptions for MSA loads/stores in MIPS R5 which doesn't seem to be correct. > > Signed-off-by: Yongbok Kim <yongbok.kim@imgtec.com> > --- > target-mips/op_helper.c | 30 ++++++++++++++++++++++++++++++ > 1 files changed, 30 insertions(+), 0 deletions(-) > > diff --git a/target-mips/op_helper.c b/target-mips/op_helper.c > index dacc92b..89a7de6 100644 > --- a/target-mips/op_helper.c > +++ b/target-mips/op_helper.c > @@ -3571,6 +3571,24 @@ FOP_CONDN_S(sne, (float32_lt(fst1, fst0, &env->active_fpu.fp_status) > /* Element-by-element access macros */ > #define DF_ELEMENTS(df) (MSA_WRLEN / DF_BITS(df)) > > +#if !defined(CONFIG_USER_ONLY) > +static bool cpu_mips_validate_msa_block_access(CPUMIPSState *env, > + target_ulong address, int df, int rw) > +{ > + int i; > + for (i = 0; i < DF_ELEMENTS(df); i++) { Do we really need to check each element, wouldn't byte 0 and byte (MSA_WRLEN/8 - 1) be enough? > + if (!cpu_mips_validate_access(env, address + (i << df), > + address, (1 << df), rw)) { I believe this would look better if this line was aligned with the first argument of the function (and would be consistent with the helper below). > + CPUState *cs = CPU(mips_env_get_cpu(env)); > + helper_raise_exception_err(env, cs->exception_index, > + env->error_code); > + return false; > + } > + } > + return true; > +} > +#endif > + > void helper_msa_ld_df(CPUMIPSState *env, uint32_t df, uint32_t wd, uint32_t rs, > int32_t s10) > { > @@ -3578,6 +3596,12 @@ void helper_msa_ld_df(CPUMIPSState *env, uint32_t df, uint32_t wd, uint32_t rs, > target_ulong addr = env->active_tc.gpr[rs] + (s10 << df); > int i; > > +#if !defined(CONFIG_USER_ONLY) > + if (!cpu_mips_validate_msa_block_access(env, addr, df, MMU_DATA_LOAD)) { > + return; > + } Shouldn't this be called only for cases where page boundary is crossed? Otherwise I don't think this validation is needed. > +#endif > + > switch (df) { > case DF_BYTE: > for (i = 0; i < DF_ELEMENTS(DF_BYTE); i++) { > @@ -3613,6 +3637,12 @@ void helper_msa_st_df(CPUMIPSState *env, uint32_t df, uint32_t wd, uint32_t rs, > target_ulong addr = env->active_tc.gpr[rs] + (s10 << df); > int i; > > +#if !defined(CONFIG_USER_ONLY) > + if (!cpu_mips_validate_msa_block_access(env, addr, df, MMU_DATA_STORE)) { > + return; > + } Same. Thanks, Leon ^ permalink raw reply [flat|nested] 9+ messages in thread
end of thread, other threads:[~2015-05-05 19:56 UTC | newest] Thread overview: 9+ messages (download: mbox.gz follow: Atom feed -- links below jump to the message on this page -- 2015-05-01 15:24 [Qemu-devel] [PATCH 0/3] target-mips: Add support for misaligned accesses Yongbok Kim 2015-05-01 15:24 ` [Qemu-devel] [PATCH 1/3] softmmu: Add size argument to do_unaligned_access() Yongbok Kim 2015-05-01 15:24 ` [Qemu-devel] [PATCH 2/3] target-mips: Misaligned Memory Accesses for R6 Yongbok Kim 2015-05-01 15:39 ` Peter Maydell 2015-05-01 15:24 ` [Qemu-devel] [PATCH 3/3] target-mips: Misaligned Memory Accesses for MSA Yongbok Kim 2015-05-01 15:43 ` Peter Maydell 2015-05-01 15:55 ` Yongbok Kim 2015-05-05 14:51 ` Leon Alrae 2015-05-05 19:54 ` Leon Alrae
This is a public inbox, see mirroring instructions for how to clone and mirror all data and code used for this inbox; as well as URLs for NNTP newsgroup(s).