From mboxrd@z Thu Jan 1 00:00:00 1970 From: R Sricharan Date: Tue, 9 Oct 2012 15:53:43 +0530 Subject: [U-Boot] [PATCH v5 09/16] arm: Add control over cachability of memory regions In-Reply-To: <1349732556-30700-10-git-send-email-sjg@chromium.org> References: <1349732556-30700-1-git-send-email-sjg@chromium.org> <1349732556-30700-10-git-send-email-sjg@chromium.org> Message-ID: <5073FB2F.7010600@ti.com> List-Id: MIME-Version: 1.0 Content-Type: text/plain; charset="us-ascii" Content-Transfer-Encoding: 7bit To: u-boot@lists.denx.de Hi, > Add support for adjusting the cachability of an L1 section by updating > the MMU. The mmu_set_region_dcache() function allows drivers to make > these changes after the MMU is set up. > > It is implemented only for ARMv7 at present. > > This is needed for LCD support, where we want to make the LCD frame buffer > write-through (or off) rather than write-back. > This is a much needed support. I was looking for a way to change the permissions to avoid speculative aborts for non executable regions. This should help. > Signed-off-by: Simon Glass > --- > Changes in v4: > - Use system bit values for enum dcache_option instead of arbitrary numbers > > arch/arm/cpu/armv7/cache_v7.c | 11 +++++++++ > arch/arm/include/asm/system.h | 30 ++++++++++++++++++++++++ > arch/arm/lib/cache-cp15.c | 50 ++++++++++++++++++++++++++++++++--------- > 3 files changed, 80 insertions(+), 11 deletions(-) > > diff --git a/arch/arm/cpu/armv7/cache_v7.c b/arch/arm/cpu/armv7/cache_v7.c > index 1b4e808..5f6d039 100644 > --- a/arch/arm/cpu/armv7/cache_v7.c > +++ b/arch/arm/cpu/armv7/cache_v7.c > @@ -297,6 +297,12 @@ void arm_init_before_mmu(void) > v7_inval_tlb(); > } > > +void mmu_page_table_flush(unsigned long start, unsigned long stop) > +{ > + flush_dcache_range(start, stop); > + v7_inval_tlb(); > +} > + > /* > * Flush range from all levels of d-cache/unified-cache used: > * Affects the range [start, start + size - 1] > @@ -329,6 +335,11 @@ void arm_init_before_mmu(void) > void flush_cache(unsigned long start, unsigned long size) > { > } > + > +void mmu_page_table_flush(unsigned long start, unsigned long stop) > +{ > +} > + > #endif /* #ifndef CONFIG_SYS_DCACHE_OFF */ > > #ifndef CONFIG_SYS_ICACHE_OFF > diff --git a/arch/arm/include/asm/system.h b/arch/arm/include/asm/system.h > index 2b28a26..9a05211 100644 > --- a/arch/arm/include/asm/system.h > +++ b/arch/arm/include/asm/system.h > @@ -75,6 +75,36 @@ static inline void set_cr(unsigned int val) > isb(); > } > > +/* options available for data cache on each page */ > +enum dcache_option { > + DCACHE_OFF = 0x12, > + DCACHE_WRITETHROUGH = 0x1a, > + DCACHE_WRITEBACK = 0x1e, > +}; > + > +/* Size of an MMU section */ > +enum { > + MMU_SECTION_SHIFT = 20, > + MMU_SECTION_SIZE = 1 << MMU_SECTION_SHIFT, > +}; > + > +/** > + * Change the cache settings for a region. > + * > + * \param start start address of memory region to change > + * \param size size of memory region to change > + * \param option dcache option to select > + */ > +void mmu_set_region_dcache(u32 start, int size, enum dcache_option option); > + > +/** > + * Register an update to the page tables, and flush the TLB > + * > + * \param start start address of update in page table > + * \param stop stop address of update in page table > + */ > +void mmu_page_table_flush(unsigned long start, unsigned long stop); > + > #endif /* __ASSEMBLY__ */ > > #define arch_align_stack(x) (x) > diff --git a/arch/arm/lib/cache-cp15.c b/arch/arm/lib/cache-cp15.c > index 939de10..c9bee42 100644 > --- a/arch/arm/lib/cache-cp15.c > +++ b/arch/arm/lib/cache-cp15.c > @@ -26,12 +26,6 @@ > > #if !(defined(CONFIG_SYS_ICACHE_OFF) && defined(CONFIG_SYS_DCACHE_OFF)) > > -#if defined(CONFIG_SYS_ARM_CACHE_WRITETHROUGH) > -#define CACHE_SETUP 0x1a > -#else > -#define CACHE_SETUP 0x1e > -#endif > - > DECLARE_GLOBAL_DATA_PTR; > > void __arm_init_before_mmu(void) > @@ -50,9 +44,40 @@ static void cp_delay (void) > asm volatile("" : : : "memory"); > } > > -static inline void dram_bank_mmu_setup(int bank) > +void set_section_dcache(int section, enum dcache_option option) > { > u32 *page_table = (u32 *)gd->tlb_addr; > + u32 value; > + > + value = (section << MMU_SECTION_SHIFT) | (3 << 10); Nit.. How about avoiding this magic value (3 << 10). > + value |= option; > + page_table[section] = value; > +} > + > +void __mmu_page_table_flush(unsigned long start, unsigned long stop) > +{ > + debug("%s: Warning: not implemented\n", __func__); > +} > + > +void mmu_page_table_flush(unsigned long start, unsigned long stop) > + __attribute__((weak, alias("__mmu_page_table_flush"))); > + > +void mmu_set_region_dcache(u32 start, int size, enum dcache_option option) > +{ > + u32 *page_table = (u32 *)gd->tlb_addr; > + u32 upto, end; > + > + end = ALIGN(start + size, MMU_SECTION_SIZE) >> MMU_SECTION_SHIFT; > + start = start >> MMU_SECTION_SHIFT; > + debug("mmu_set_region_dcache start=%x, size=%x, option=%d\n", > + start, size, option); > + for (upto = start; upto < end; upto++) > + set_section_dcache(upto, option); > + mmu_page_table_flush((u32)&page_table[start], (u32)&page_table[end]); > +} Should this be page_table[end + 1] instead, to ensure that the last entry is also flushed ?