linux-arm-kernel.lists.infradead.org archive mirror
 help / color / mirror / Atom feed
* [PATCH v2 0/8] Patches for 2.6.35
@ 2010-06-02 17:02 Catalin Marinas
  2010-06-02 17:02 ` [PATCH v2 1/8] ARM: Improve the L2 cache performance when PL310 is used Catalin Marinas
                   ` (7 more replies)
  0 siblings, 8 replies; 9+ messages in thread
From: Catalin Marinas @ 2010-06-02 17:02 UTC (permalink / raw)
  To: linux-arm-kernel

These are a set of patches I'd like to get merged in 2.6.35 (during the
-rc) period.

The first one was posted some time ago but deferred because of other
conflicting patch (the 16-way PL310).

Some of the other patches are necessary fixes plus a few trivial ones.

Thanks.


Catalin Marinas (8):
      ARM: Improve the L2 cache performance when PL310 is used
      ARM: Align machine_desc.phys_io to a 1MB section
      ARM: Avoid the CONSISTENT_DMA_SIZE warning on noMMU builds
      ARM: The v6_dma_inv_range() function must preserve data on SMP
      ARM: Add a config option for the ARM11MPCore DMA cache maintenance workaround
      ARM: Add support for the MOVW/MOVT relocations in Thumb-2
      ARM: Remove dummy loads from the original relocation address
      ARM: Do not compile the Thumb-2 module relocations on an ARM kernel


 arch/arm/include/asm/elf.h |    2 +
 arch/arm/kernel/head.S     |    2 +
 arch/arm/kernel/module.c   |   34 +++++++++++++++++++++++
 arch/arm/mm/Kconfig        |   26 ++++++++++++++++++
 arch/arm/mm/cache-l2x0.c   |   65 ++++++++++++++++++++++++++++++--------------
 arch/arm/mm/cache-v6.S     |   18 +++++++++---
 arch/arm/mm/dma-mapping.c  |    2 +
 7 files changed, 124 insertions(+), 25 deletions(-)

-- 
Catalin

^ permalink raw reply	[flat|nested] 9+ messages in thread

* [PATCH v2 1/8] ARM: Improve the L2 cache performance when PL310 is used
  2010-06-02 17:02 [PATCH v2 0/8] Patches for 2.6.35 Catalin Marinas
@ 2010-06-02 17:02 ` Catalin Marinas
  2010-06-02 17:02 ` [PATCH v2 2/8] ARM: Align machine_desc.phys_io to a 1MB section Catalin Marinas
                   ` (6 subsequent siblings)
  7 siblings, 0 replies; 9+ messages in thread
From: Catalin Marinas @ 2010-06-02 17:02 UTC (permalink / raw)
  To: linux-arm-kernel

With this L2 cache controller, the cache maintenance by PA and sync
operations are atomic and do not require a "wait" loop or spinlocks.
This patch conditionally defines the cache_wait() function and locking
primitives (rather than duplicating the functions or file).

Since L2x0 cache controllers do not work with ARMv7 CPUs, the patch
automatically enables CACHE_PL310 when CPU_V7 is defined.

Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
---
 arch/arm/mm/Kconfig      |    7 +++++
 arch/arm/mm/cache-l2x0.c |   65 +++++++++++++++++++++++++++++++---------------
 2 files changed, 51 insertions(+), 21 deletions(-)

diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig
index 346ae14..521f3cc 100644
--- a/arch/arm/mm/Kconfig
+++ b/arch/arm/mm/Kconfig
@@ -770,6 +770,13 @@ config CACHE_L2X0
 	help
 	  This option enables the L2x0 PrimeCell.
 
+config CACHE_PL310
+	bool
+	depends on CACHE_L2X0
+	default y if CPU_V7
+	help
+	  This option enables support for the PL310 cache controller.
+
 config CACHE_TAUROS2
 	bool "Enable the Tauros2 L2 cache controller"
 	depends on (ARCH_DOVE || ARCH_MMP)
diff --git a/arch/arm/mm/cache-l2x0.c b/arch/arm/mm/cache-l2x0.c
index 9819869..e4fa95c 100644
--- a/arch/arm/mm/cache-l2x0.c
+++ b/arch/arm/mm/cache-l2x0.c
@@ -26,16 +26,39 @@
 #define CACHE_LINE_SIZE		32
 
 static void __iomem *l2x0_base;
-static DEFINE_SPINLOCK(l2x0_lock);
 static uint32_t l2x0_way_mask;	/* Bitmask of active ways */
 
-static inline void cache_wait(void __iomem *reg, unsigned long mask)
+static inline void cache_wait_always(void __iomem *reg, unsigned long mask)
 {
 	/* wait for the operation to complete */
 	while (readl(reg) & mask)
 		;
 }
 
+#ifdef CONFIG_CACHE_PL310
+
+static inline void cache_wait(void __iomem *reg, unsigned long mask)
+{
+	/* cache operations are atomic */
+}
+
+#define _l2x0_lock(lock, flags)		((void)(flags))
+#define _l2x0_unlock(lock, flags)	((void)(flags))
+
+#define block_end(start, end)		(end)
+
+#else	/* !CONFIG_CACHE_PL310 */
+
+#define cache_wait			cache_wait_always
+
+static DEFINE_SPINLOCK(l2x0_lock);
+#define _l2x0_lock(lock, flags)		spin_lock_irqsave(lock, flags)
+#define _l2x0_unlock(lock, flags)	spin_unlock_irqrestore(lock, flags)
+
+#define block_end(start, end)		((start) + min((end) - (start), 4096UL))
+
+#endif	/* CONFIG_CACHE_PL310 */
+
 static inline void cache_sync(void)
 {
 	void __iomem *base = l2x0_base;
@@ -98,9 +121,9 @@ static void l2x0_cache_sync(void)
 {
 	unsigned long flags;
 
-	spin_lock_irqsave(&l2x0_lock, flags);
+	_l2x0_lock(&l2x0_lock, flags);
 	cache_sync();
-	spin_unlock_irqrestore(&l2x0_lock, flags);
+	_l2x0_unlock(&l2x0_lock, flags);
 }
 
 static inline void l2x0_inv_all(void)
@@ -108,11 +131,11 @@ static inline void l2x0_inv_all(void)
 	unsigned long flags;
 
 	/* invalidate all ways */
-	spin_lock_irqsave(&l2x0_lock, flags);
+	_l2x0_lock(&l2x0_lock, flags);
 	writel(l2x0_way_mask, l2x0_base + L2X0_INV_WAY);
 	cache_wait(l2x0_base + L2X0_INV_WAY, l2x0_way_mask);
 	cache_sync();
-	spin_unlock_irqrestore(&l2x0_lock, flags);
+	_l2x0_unlock(&l2x0_lock, flags);
 }
 
 static void l2x0_inv_range(unsigned long start, unsigned long end)
@@ -120,7 +143,7 @@ static void l2x0_inv_range(unsigned long start, unsigned long end)
 	void __iomem *base = l2x0_base;
 	unsigned long flags;
 
-	spin_lock_irqsave(&l2x0_lock, flags);
+	_l2x0_lock(&l2x0_lock, flags);
 	if (start & (CACHE_LINE_SIZE - 1)) {
 		start &= ~(CACHE_LINE_SIZE - 1);
 		debug_writel(0x03);
@@ -137,7 +160,7 @@ static void l2x0_inv_range(unsigned long start, unsigned long end)
 	}
 
 	while (start < end) {
-		unsigned long blk_end = start + min(end - start, 4096UL);
+		unsigned long blk_end = block_end(start, end);
 
 		while (start < blk_end) {
 			l2x0_inv_line(start);
@@ -145,13 +168,13 @@ static void l2x0_inv_range(unsigned long start, unsigned long end)
 		}
 
 		if (blk_end < end) {
-			spin_unlock_irqrestore(&l2x0_lock, flags);
-			spin_lock_irqsave(&l2x0_lock, flags);
+			_l2x0_unlock(&l2x0_lock, flags);
+			_l2x0_lock(&l2x0_lock, flags);
 		}
 	}
 	cache_wait(base + L2X0_INV_LINE_PA, 1);
 	cache_sync();
-	spin_unlock_irqrestore(&l2x0_lock, flags);
+	_l2x0_unlock(&l2x0_lock, flags);
 }
 
 static void l2x0_clean_range(unsigned long start, unsigned long end)
@@ -159,10 +182,10 @@ static void l2x0_clean_range(unsigned long start, unsigned long end)
 	void __iomem *base = l2x0_base;
 	unsigned long flags;
 
-	spin_lock_irqsave(&l2x0_lock, flags);
+	_l2x0_lock(&l2x0_lock, flags);
 	start &= ~(CACHE_LINE_SIZE - 1);
 	while (start < end) {
-		unsigned long blk_end = start + min(end - start, 4096UL);
+		unsigned long blk_end = block_end(start, end);
 
 		while (start < blk_end) {
 			l2x0_clean_line(start);
@@ -170,13 +193,13 @@ static void l2x0_clean_range(unsigned long start, unsigned long end)
 		}
 
 		if (blk_end < end) {
-			spin_unlock_irqrestore(&l2x0_lock, flags);
-			spin_lock_irqsave(&l2x0_lock, flags);
+			_l2x0_unlock(&l2x0_lock, flags);
+			_l2x0_lock(&l2x0_lock, flags);
 		}
 	}
 	cache_wait(base + L2X0_CLEAN_LINE_PA, 1);
 	cache_sync();
-	spin_unlock_irqrestore(&l2x0_lock, flags);
+	_l2x0_unlock(&l2x0_lock, flags);
 }
 
 static void l2x0_flush_range(unsigned long start, unsigned long end)
@@ -184,10 +207,10 @@ static void l2x0_flush_range(unsigned long start, unsigned long end)
 	void __iomem *base = l2x0_base;
 	unsigned long flags;
 
-	spin_lock_irqsave(&l2x0_lock, flags);
+	_l2x0_lock(&l2x0_lock, flags);
 	start &= ~(CACHE_LINE_SIZE - 1);
 	while (start < end) {
-		unsigned long blk_end = start + min(end - start, 4096UL);
+		unsigned long blk_end = block_end(start, end);
 
 		debug_writel(0x03);
 		while (start < blk_end) {
@@ -197,13 +220,13 @@ static void l2x0_flush_range(unsigned long start, unsigned long end)
 		debug_writel(0x00);
 
 		if (blk_end < end) {
-			spin_unlock_irqrestore(&l2x0_lock, flags);
-			spin_lock_irqsave(&l2x0_lock, flags);
+			_l2x0_unlock(&l2x0_lock, flags);
+			_l2x0_lock(&l2x0_lock, flags);
 		}
 	}
 	cache_wait(base + L2X0_CLEAN_INV_LINE_PA, 1);
 	cache_sync();
-	spin_unlock_irqrestore(&l2x0_lock, flags);
+	_l2x0_unlock(&l2x0_lock, flags);
 }
 
 void __init l2x0_init(void __iomem *base, __u32 aux_val, __u32 aux_mask)

^ permalink raw reply related	[flat|nested] 9+ messages in thread

* [PATCH v2 2/8] ARM: Align machine_desc.phys_io to a 1MB section
  2010-06-02 17:02 [PATCH v2 0/8] Patches for 2.6.35 Catalin Marinas
  2010-06-02 17:02 ` [PATCH v2 1/8] ARM: Improve the L2 cache performance when PL310 is used Catalin Marinas
@ 2010-06-02 17:02 ` Catalin Marinas
  2010-06-02 17:02 ` [PATCH v2 3/8] ARM: Avoid the CONSISTENT_DMA_SIZE warning on noMMU builds Catalin Marinas
                   ` (5 subsequent siblings)
  7 siblings, 0 replies; 9+ messages in thread
From: Catalin Marinas @ 2010-06-02 17:02 UTC (permalink / raw)
  To: linux-arm-kernel

Platforms like RealView don't pass a section-aligned pointer via the
machine_desc structure. This patch aligns the pointer in the
__create_page_tables function. Reported by Tony Thompson.

Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
---
 arch/arm/kernel/head.S |    2 ++
 1 files changed, 2 insertions(+), 0 deletions(-)

diff --git a/arch/arm/kernel/head.S b/arch/arm/kernel/head.S
index eb62bf9..82ea924 100644
--- a/arch/arm/kernel/head.S
+++ b/arch/arm/kernel/head.S
@@ -302,6 +302,8 @@ __create_page_tables:
 	movhi	r3, #0x0800
 	add	r6, r0, r3
 	ldr	r3, [r8, #MACHINFO_PHYSIO]
+	mov	r3, r3, lsr #20			@ 1MB-aligned address
+	mov	r3, r3, lsl #20
 	orr	r3, r3, r7
 1:	str	r3, [r0], #4
 	add	r3, r3, #1 << 20

^ permalink raw reply related	[flat|nested] 9+ messages in thread

* [PATCH v2 3/8] ARM: Avoid the CONSISTENT_DMA_SIZE warning on noMMU builds
  2010-06-02 17:02 [PATCH v2 0/8] Patches for 2.6.35 Catalin Marinas
  2010-06-02 17:02 ` [PATCH v2 1/8] ARM: Improve the L2 cache performance when PL310 is used Catalin Marinas
  2010-06-02 17:02 ` [PATCH v2 2/8] ARM: Align machine_desc.phys_io to a 1MB section Catalin Marinas
@ 2010-06-02 17:02 ` Catalin Marinas
  2010-06-02 17:02 ` [PATCH v2 4/8] ARM: The v6_dma_inv_range() function must preserve data on SMP Catalin Marinas
                   ` (4 subsequent siblings)
  7 siblings, 0 replies; 9+ messages in thread
From: Catalin Marinas @ 2010-06-02 17:02 UTC (permalink / raw)
  To: linux-arm-kernel

This macro is not defined when !CONFIG_MMU.

Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
---
 arch/arm/mm/dma-mapping.c |    2 ++
 1 files changed, 2 insertions(+), 0 deletions(-)

diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
index 13fa536..c4de90f 100644
--- a/arch/arm/mm/dma-mapping.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -25,6 +25,7 @@
 #include <asm/sizes.h>
 
 /* Sanity check size */
+#ifdef CONFIG_MMU
 #if (CONSISTENT_DMA_SIZE % SZ_2M)
 #error "CONSISTENT_DMA_SIZE must be multiple of 2MiB"
 #endif
@@ -32,6 +33,7 @@
 #define CONSISTENT_OFFSET(x)	(((unsigned long)(x) - CONSISTENT_BASE) >> PAGE_SHIFT)
 #define CONSISTENT_PTE_INDEX(x) (((unsigned long)(x) - CONSISTENT_BASE) >> PGDIR_SHIFT)
 #define NUM_CONSISTENT_PTES (CONSISTENT_DMA_SIZE >> PGDIR_SHIFT)
+#endif
 
 static u64 get_coherent_dma_mask(struct device *dev)
 {

^ permalink raw reply related	[flat|nested] 9+ messages in thread

* [PATCH v2 4/8] ARM: The v6_dma_inv_range() function must preserve data on SMP
  2010-06-02 17:02 [PATCH v2 0/8] Patches for 2.6.35 Catalin Marinas
                   ` (2 preceding siblings ...)
  2010-06-02 17:02 ` [PATCH v2 3/8] ARM: Avoid the CONSISTENT_DMA_SIZE warning on noMMU builds Catalin Marinas
@ 2010-06-02 17:02 ` Catalin Marinas
  2010-06-02 17:02 ` [PATCH v2 5/8] ARM: Add a config option for the ARM11MPCore DMA cache maintenance workaround Catalin Marinas
                   ` (3 subsequent siblings)
  7 siblings, 0 replies; 9+ messages in thread
From: Catalin Marinas @ 2010-06-02 17:02 UTC (permalink / raw)
  To: linux-arm-kernel

A recent patch for DMA cache maintenance on ARM11MPCore added a write
for ownership trick to the v6_dma_inv_range() function. Such operation
destroys data already present in the buffer. However, this function is
used with with dma_sync_single_for_device() which is supposed to
preserve the existing data transfered into the buffer. This patch adds a
combination of read/write for ownership to preserve the original data.

Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
Reported-by: Ronen Shitrit <rshitrit@marvell.com>
---
 arch/arm/mm/cache-v6.S |    3 ++-
 1 files changed, 2 insertions(+), 1 deletions(-)

diff --git a/arch/arm/mm/cache-v6.S b/arch/arm/mm/cache-v6.S
index e46ecd8..332b48c 100644
--- a/arch/arm/mm/cache-v6.S
+++ b/arch/arm/mm/cache-v6.S
@@ -212,7 +212,8 @@ v6_dma_inv_range:
 #endif
 1:
 #ifdef CONFIG_SMP
-	str	r0, [r0]			@ write for ownership
+	ldr	r2, [r0]			@ read for ownership
+	str	r2, [r0]			@ write for ownership
 #endif
 #ifdef HARVARD_CACHE
 	mcr	p15, 0, r0, c7, c6, 1		@ invalidate D line

^ permalink raw reply related	[flat|nested] 9+ messages in thread

* [PATCH v2 5/8] ARM: Add a config option for the ARM11MPCore DMA cache maintenance workaround
  2010-06-02 17:02 [PATCH v2 0/8] Patches for 2.6.35 Catalin Marinas
                   ` (3 preceding siblings ...)
  2010-06-02 17:02 ` [PATCH v2 4/8] ARM: The v6_dma_inv_range() function must preserve data on SMP Catalin Marinas
@ 2010-06-02 17:02 ` Catalin Marinas
  2010-06-02 17:02 ` [PATCH v2 6/8] ARM: Add support for the MOVW/MOVT relocations in Thumb-2 Catalin Marinas
                   ` (2 subsequent siblings)
  7 siblings, 0 replies; 9+ messages in thread
From: Catalin Marinas @ 2010-06-02 17:02 UTC (permalink / raw)
  To: linux-arm-kernel

Commit f4d6477f introduced a workaround for the lack of hardware
broadcasting of the cache maintenance operations on ARM11MPCore.
However, the workaround is only valid on CPUs that do not do speculative
loads into the D-cache.

This patch adds a Kconfig option with the corresponding help to make the
above clear. When the DMA_CACHE_RWFO option is disabled, the kernel
behaviour is that prior to the f4d6477f commit. This also allows ARMv6
UP processors with speculative loads to work correctly.

For other processors, a different workaround may be needed.

Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
Cc: Ronen Shitrit <rshitrit@marvell.com>
---
 arch/arm/mm/Kconfig    |   19 +++++++++++++++++++
 arch/arm/mm/cache-v6.S |   15 ++++++++++++---
 2 files changed, 31 insertions(+), 3 deletions(-)

diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig
index 521f3cc..adfc663 100644
--- a/arch/arm/mm/Kconfig
+++ b/arch/arm/mm/Kconfig
@@ -735,6 +735,25 @@ config NEEDS_SYSCALL_FOR_CMPXCHG
 	  Forget about fast user space cmpxchg support.
 	  It is just not possible.
 
+config DMA_CACHE_RWFO
+	bool "Enable read/write for ownership DMA cache maintenance"
+	depends on CPU_V6 && SMP
+	default y
+	help
+	  The Snoop Control Unit on ARM11MPCore does not detect the
+	  cache maintenance operations and the dma_{map,unmap}_area()
+	  functions may leave stale cache entries on other CPUs. By
+	  enabling this option, Read or Write For Ownership in the ARMv6
+	  DMA cache maintenance functions is performed. These LDR/STR
+	  instructions change the cache line state to shared or modified
+	  so that the cache operation has the desired effect.
+
+	  Note that the workaround is only valid on processors that do
+	  not perform speculative loads into the D-cache. For such
+	  processors, if cache maintenance operations are not broadcast
+	  in hardware, other workarounds are needed (e.g. cache
+	  maintenance broadcasting in software via FIQ).
+
 config OUTER_CACHE
 	bool
 
diff --git a/arch/arm/mm/cache-v6.S b/arch/arm/mm/cache-v6.S
index 332b48c..86aa689 100644
--- a/arch/arm/mm/cache-v6.S
+++ b/arch/arm/mm/cache-v6.S
@@ -211,7 +211,7 @@ v6_dma_inv_range:
 	mcrne	p15, 0, r1, c7, c15, 1		@ clean & invalidate unified line
 #endif
 1:
-#ifdef CONFIG_SMP
+#ifdef CONFIG_DMA_CACHE_RWFO
 	ldr	r2, [r0]			@ read for ownership
 	str	r2, [r0]			@ write for ownership
 #endif
@@ -235,7 +235,7 @@ v6_dma_inv_range:
 v6_dma_clean_range:
 	bic	r0, r0, #D_CACHE_LINE_SIZE - 1
 1:
-#ifdef CONFIG_SMP
+#ifdef CONFIG_DMA_CACHE_RWFO
 	ldr	r2, [r0]			@ read for ownership
 #endif
 #ifdef HARVARD_CACHE
@@ -258,7 +258,7 @@ v6_dma_clean_range:
 ENTRY(v6_dma_flush_range)
 	bic	r0, r0, #D_CACHE_LINE_SIZE - 1
 1:
-#ifdef CONFIG_SMP
+#ifdef CONFIG_DMA_CACHE_RWFO
 	ldr	r2, [r0]			@ read for ownership
 	str	r2, [r0]			@ write for ownership
 #endif
@@ -284,9 +284,13 @@ ENTRY(v6_dma_map_area)
 	add	r1, r1, r0
 	teq	r2, #DMA_FROM_DEVICE
 	beq	v6_dma_inv_range
+#ifndef CONFIG_DMA_CACHE_RWFO
+	b	v6_dma_clean_range
+#else
 	teq	r2, #DMA_TO_DEVICE
 	beq	v6_dma_clean_range
 	b	v6_dma_flush_range
+#endif
 ENDPROC(v6_dma_map_area)
 
 /*
@@ -296,6 +300,11 @@ ENDPROC(v6_dma_map_area)
  *	- dir	- DMA direction
  */
 ENTRY(v6_dma_unmap_area)
+#ifndef CONFIG_DMA_CACHE_RWFO
+	add	r1, r1, r0
+	teq	r2, #DMA_TO_DEVICE
+	bne	v6_dma_inv_range
+#endif
 	mov	pc, lr
 ENDPROC(v6_dma_unmap_area)
 

^ permalink raw reply related	[flat|nested] 9+ messages in thread

* [PATCH v2 6/8] ARM: Add support for the MOVW/MOVT relocations in Thumb-2
  2010-06-02 17:02 [PATCH v2 0/8] Patches for 2.6.35 Catalin Marinas
                   ` (4 preceding siblings ...)
  2010-06-02 17:02 ` [PATCH v2 5/8] ARM: Add a config option for the ARM11MPCore DMA cache maintenance workaround Catalin Marinas
@ 2010-06-02 17:02 ` Catalin Marinas
  2010-06-02 17:02 ` [PATCH v2 7/8] ARM: Remove dummy loads from the original relocation address Catalin Marinas
  2010-06-02 17:02 ` [PATCH v2 8/8] ARM: Do not compile the Thumb-2 module relocations on an ARM kernel Catalin Marinas
  7 siblings, 0 replies; 9+ messages in thread
From: Catalin Marinas @ 2010-06-02 17:02 UTC (permalink / raw)
  To: linux-arm-kernel

The patch adds handling case for the R_ARM_THM_MOVW_ABS_NC and
R_ARM_THM_MOVT_ABS relocations in arch/arm/kernel/module.c. Such
relocations may appear in Thumb-2 compiled kernel modules.

Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
Reported-by: Kyungmin Park <kmpark@infradead.org>
---
 arch/arm/include/asm/elf.h |    2 ++
 arch/arm/kernel/module.c   |   32 ++++++++++++++++++++++++++++++++
 2 files changed, 34 insertions(+), 0 deletions(-)

diff --git a/arch/arm/include/asm/elf.h b/arch/arm/include/asm/elf.h
index 51662fe..4d0e730 100644
--- a/arch/arm/include/asm/elf.h
+++ b/arch/arm/include/asm/elf.h
@@ -59,6 +59,8 @@ typedef struct user_fp elf_fpregset_t;
 
 #define R_ARM_THM_CALL		10
 #define R_ARM_THM_JUMP24	30
+#define R_ARM_THM_MOVW_ABS_NC	47
+#define R_ARM_THM_MOVT_ABS	48
 
 /*
  * These are used to set parameters in the core dumps.
diff --git a/arch/arm/kernel/module.c b/arch/arm/kernel/module.c
index c628bdf..ae3c804 100644
--- a/arch/arm/kernel/module.c
+++ b/arch/arm/kernel/module.c
@@ -237,6 +237,38 @@ apply_relocate(Elf32_Shdr *sechdrs, const char *strtab, unsigned int symindex,
 			lower = *(u16 *)(loc + 2);
 			break;
 
+		case R_ARM_THM_MOVW_ABS_NC:
+		case R_ARM_THM_MOVT_ABS:
+			upper = *(u16 *)loc;
+			lower = *(u16 *)(loc + 2);
+
+			/*
+			 * MOVT/MOVW instructions encoding in Thumb-2:
+			 *
+			 * i	= upper[10]
+			 * imm4	= upper[3:0]
+			 * imm3	= lower[14:12]
+			 * imm8	= lower[7:0]
+			 *
+			 * imm16 = imm4:i:imm3:imm8
+			 */
+			offset = ((upper & 0x000f) << 12) |
+				((upper & 0x0400) << 1) |
+				((lower & 0x7000) >> 4) | (lower & 0x00ff);
+			offset = (offset ^ 0x8000) - 0x8000;
+			offset += sym->st_value;
+
+			if (ELF32_R_TYPE(rel->r_info) == R_ARM_THM_MOVT_ABS)
+				offset >>= 16;
+
+			*(u16 *)loc = (u16)((upper & 0xfbf0) |
+					    ((offset & 0xf000) >> 12) |
+					    ((offset & 0x0800) >> 1));
+			*(u16 *)(loc + 2) = (u16)((lower & 0x8f00) |
+						  ((offset & 0x0700) << 4) |
+						  (offset & 0x00ff));
+			break;
+
 		default:
 			printk(KERN_ERR "%s: unknown relocation: %u\n",
 			       module->name, ELF32_R_TYPE(rel->r_info));

^ permalink raw reply related	[flat|nested] 9+ messages in thread

* [PATCH v2 7/8] ARM: Remove dummy loads from the original relocation address
  2010-06-02 17:02 [PATCH v2 0/8] Patches for 2.6.35 Catalin Marinas
                   ` (5 preceding siblings ...)
  2010-06-02 17:02 ` [PATCH v2 6/8] ARM: Add support for the MOVW/MOVT relocations in Thumb-2 Catalin Marinas
@ 2010-06-02 17:02 ` Catalin Marinas
  2010-06-02 17:02 ` [PATCH v2 8/8] ARM: Do not compile the Thumb-2 module relocations on an ARM kernel Catalin Marinas
  7 siblings, 0 replies; 9+ messages in thread
From: Catalin Marinas @ 2010-06-02 17:02 UTC (permalink / raw)
  To: linux-arm-kernel

Reading back the upper and lower values in the R_ARM_THM_CALL and
R_ARM_THM_JUMP24 case was introduced by a previous commit but they are
not needed.

Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
---
 arch/arm/kernel/module.c |    2 --
 1 files changed, 0 insertions(+), 2 deletions(-)

diff --git a/arch/arm/kernel/module.c b/arch/arm/kernel/module.c
index ae3c804..aab7fca 100644
--- a/arch/arm/kernel/module.c
+++ b/arch/arm/kernel/module.c
@@ -233,8 +233,6 @@ apply_relocate(Elf32_Shdr *sechdrs, const char *strtab, unsigned int symindex,
 			*(u16 *)(loc + 2) = (u16)((lower & 0xd000) |
 						  (j1 << 13) | (j2 << 11) |
 						  ((offset >> 1) & 0x07ff));
-			upper = *(u16 *)loc;
-			lower = *(u16 *)(loc + 2);
 			break;
 
 		case R_ARM_THM_MOVW_ABS_NC:

^ permalink raw reply related	[flat|nested] 9+ messages in thread

* [PATCH v2 8/8] ARM: Do not compile the Thumb-2 module relocations on an ARM kernel
  2010-06-02 17:02 [PATCH v2 0/8] Patches for 2.6.35 Catalin Marinas
                   ` (6 preceding siblings ...)
  2010-06-02 17:02 ` [PATCH v2 7/8] ARM: Remove dummy loads from the original relocation address Catalin Marinas
@ 2010-06-02 17:02 ` Catalin Marinas
  7 siblings, 0 replies; 9+ messages in thread
From: Catalin Marinas @ 2010-06-02 17:02 UTC (permalink / raw)
  To: linux-arm-kernel

Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
---
 arch/arm/kernel/module.c |    4 ++++
 1 files changed, 4 insertions(+), 0 deletions(-)

diff --git a/arch/arm/kernel/module.c b/arch/arm/kernel/module.c
index aab7fca..6b46058 100644
--- a/arch/arm/kernel/module.c
+++ b/arch/arm/kernel/module.c
@@ -102,7 +102,9 @@ apply_relocate(Elf32_Shdr *sechdrs, const char *strtab, unsigned int symindex,
 		unsigned long loc;
 		Elf32_Sym *sym;
 		s32 offset;
+#ifdef CONFIG_THUMB2_KERNEL
 		u32 upper, lower, sign, j1, j2;
+#endif
 
 		offset = ELF32_R_SYM(rel->r_info);
 		if (offset < 0 || offset > (symsec->sh_size / sizeof(Elf32_Sym))) {
@@ -185,6 +187,7 @@ apply_relocate(Elf32_Shdr *sechdrs, const char *strtab, unsigned int symindex,
 					(offset & 0x0fff);
 			break;
 
+#ifdef CONFIG_THUMB2_KERNEL
 		case R_ARM_THM_CALL:
 		case R_ARM_THM_JUMP24:
 			upper = *(u16 *)loc;
@@ -266,6 +269,7 @@ apply_relocate(Elf32_Shdr *sechdrs, const char *strtab, unsigned int symindex,
 						  ((offset & 0x0700) << 4) |
 						  (offset & 0x00ff));
 			break;
+#endif
 
 		default:
 			printk(KERN_ERR "%s: unknown relocation: %u\n",

^ permalink raw reply related	[flat|nested] 9+ messages in thread

end of thread, other threads:[~2010-06-02 17:02 UTC | newest]

Thread overview: 9+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2010-06-02 17:02 [PATCH v2 0/8] Patches for 2.6.35 Catalin Marinas
2010-06-02 17:02 ` [PATCH v2 1/8] ARM: Improve the L2 cache performance when PL310 is used Catalin Marinas
2010-06-02 17:02 ` [PATCH v2 2/8] ARM: Align machine_desc.phys_io to a 1MB section Catalin Marinas
2010-06-02 17:02 ` [PATCH v2 3/8] ARM: Avoid the CONSISTENT_DMA_SIZE warning on noMMU builds Catalin Marinas
2010-06-02 17:02 ` [PATCH v2 4/8] ARM: The v6_dma_inv_range() function must preserve data on SMP Catalin Marinas
2010-06-02 17:02 ` [PATCH v2 5/8] ARM: Add a config option for the ARM11MPCore DMA cache maintenance workaround Catalin Marinas
2010-06-02 17:02 ` [PATCH v2 6/8] ARM: Add support for the MOVW/MOVT relocations in Thumb-2 Catalin Marinas
2010-06-02 17:02 ` [PATCH v2 7/8] ARM: Remove dummy loads from the original relocation address Catalin Marinas
2010-06-02 17:02 ` [PATCH v2 8/8] ARM: Do not compile the Thumb-2 module relocations on an ARM kernel Catalin Marinas

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).