linux-kernel.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
* [PATCH] x86: Improve 64 bit __phys_addr call performance
@ 2012-10-09 18:50 Alexander Duyck
  2012-10-10 13:58 ` Andi Kleen
  2012-10-24 10:25 ` Ingo Molnar
  0 siblings, 2 replies; 5+ messages in thread
From: Alexander Duyck @ 2012-10-09 18:50 UTC (permalink / raw)
  To: tglx, mingo, hpa, andi; +Cc: linux-kernel, x86

This patch is meant to improve overall system performance when making use of
the __phys_addr call on 64 bit x86 systems.  To do this I have implemented
several changes.

First if CONFIG_DEBUG_VIRTUAL is not defined __phys_addr is made an inline,
similar to how this is currently handled in 32 bit.  However in order to do
this it is required to export phys_base so that it is available if __phys_addr
is used in kernel modules.

The second change was to streamline the code by making use of the carry flag
on an add operation instead of performing a compare on a 64 bit value.  The
advantage to this is that it allows us to reduce the overall size of the call.
On my Xeon E5 system the entire __phys_addr inline call consumes 30 bytes and
5 instructions.  I also applied similar logic to the debug version of the
function.  My testing shows that the debug version of the function with this
patch applied is slightly faster than the non-debug version without the patch.

Finally, when building the kernel with the first two changes applied I saw
build warnings about __START_KERNEL_map and PAGE_OFFSET constants not fitting
in their type.  In order to resolve the build warning I changed their type
from UL to ULL.

Signed-off-by: Alexander Duyck <alexander.h.duyck@intel.com>
---

 arch/x86/include/asm/page_64_types.h |   16 ++++++++++++++--
 arch/x86/kernel/x8664_ksyms_64.c     |    3 +++
 arch/x86/mm/physaddr.c               |   20 ++++++++++++++------
 3 files changed, 31 insertions(+), 8 deletions(-)

diff --git a/arch/x86/include/asm/page_64_types.h b/arch/x86/include/asm/page_64_types.h
index 320f7bb..a951e4d 100644
--- a/arch/x86/include/asm/page_64_types.h
+++ b/arch/x86/include/asm/page_64_types.h
@@ -30,14 +30,14 @@
  * hypervisor to fit.  Choosing 16 slots here is arbitrary, but it's
  * what Xen requires.
  */
-#define __PAGE_OFFSET           _AC(0xffff880000000000, UL)
+#define __PAGE_OFFSET           _AC(0xffff880000000000, ULL)
 
 #define __PHYSICAL_START	((CONFIG_PHYSICAL_START +	 	\
 				  (CONFIG_PHYSICAL_ALIGN - 1)) &	\
 				 ~(CONFIG_PHYSICAL_ALIGN - 1))
 
 #define __START_KERNEL		(__START_KERNEL_map + __PHYSICAL_START)
-#define __START_KERNEL_map	_AC(0xffffffff80000000, UL)
+#define __START_KERNEL_map	_AC(0xffffffff80000000, ULL)
 
 /* See Documentation/x86/x86_64/mm.txt for a description of the memory map. */
 #define __PHYSICAL_MASK_SHIFT	46
@@ -58,7 +58,19 @@ void copy_page(void *to, void *from);
 extern unsigned long max_pfn;
 extern unsigned long phys_base;
 
+#ifdef CONFIG_DEBUG_VIRTUAL
 extern unsigned long __phys_addr(unsigned long);
+#else
+static inline unsigned long __phys_addr(unsigned long x)
+{
+	unsigned long y = x - __START_KERNEL_map;
+
+	/* use the carry flag to determine if x was < __START_KERNEL_map */
+	x = y + ((x > y) ? phys_base : (__START_KERNEL_map - PAGE_OFFSET));
+
+	return x;
+}
+#endif
 #define __phys_reloc_hide(x)	(x)
 
 #define vmemmap ((struct page *)VMEMMAP_START)
diff --git a/arch/x86/kernel/x8664_ksyms_64.c b/arch/x86/kernel/x8664_ksyms_64.c
index 1330dd1..b014d94 100644
--- a/arch/x86/kernel/x8664_ksyms_64.c
+++ b/arch/x86/kernel/x8664_ksyms_64.c
@@ -59,6 +59,9 @@ EXPORT_SYMBOL(memcpy);
 EXPORT_SYMBOL(__memcpy);
 EXPORT_SYMBOL(memmove);
 
+#ifndef CONFIG_DEBUG_VIRTUAL
+EXPORT_SYMBOL(phys_base);
+#endif
 EXPORT_SYMBOL(empty_zero_page);
 #ifndef CONFIG_PARAVIRT
 EXPORT_SYMBOL(native_load_gs_index);
diff --git a/arch/x86/mm/physaddr.c b/arch/x86/mm/physaddr.c
index d2e2735..f63bec5 100644
--- a/arch/x86/mm/physaddr.c
+++ b/arch/x86/mm/physaddr.c
@@ -8,20 +8,28 @@
 
 #ifdef CONFIG_X86_64
 
+#ifdef CONFIG_DEBUG_VIRTUAL
 unsigned long __phys_addr(unsigned long x)
 {
-	if (x >= __START_KERNEL_map) {
-		x -= __START_KERNEL_map;
-		VIRTUAL_BUG_ON(x >= KERNEL_IMAGE_SIZE);
-		x += phys_base;
+	unsigned long y = x - __START_KERNEL_map;
+
+	/* use the carry flag to determine if x was < __START_KERNEL_map */
+	if (unlikely(x > y)) {
+		x = y + phys_base;
+
+		VIRTUAL_BUG_ON(y >= KERNEL_IMAGE_SIZE);
 	} else {
-		VIRTUAL_BUG_ON(x < PAGE_OFFSET);
-		x -= PAGE_OFFSET;
+		x = y + (__START_KERNEL_map - PAGE_OFFSET);
+
+		/* carry flag will be set if starting x was >= PAGE_OFFSET */
+		VIRTUAL_BUG_ON(x > y);
 		VIRTUAL_BUG_ON(!phys_addr_valid(x));
 	}
+
 	return x;
 }
 EXPORT_SYMBOL(__phys_addr);
+#endif
 
 bool __virt_addr_valid(unsigned long x)
 {


^ permalink raw reply related	[flat|nested] 5+ messages in thread

* Re: [PATCH] x86: Improve 64 bit __phys_addr call performance
  2012-10-09 18:50 [PATCH] x86: Improve 64 bit __phys_addr call performance Alexander Duyck
@ 2012-10-10 13:58 ` Andi Kleen
  2012-10-10 23:56   ` Alexander Duyck
  2012-10-24 10:25 ` Ingo Molnar
  1 sibling, 1 reply; 5+ messages in thread
From: Andi Kleen @ 2012-10-10 13:58 UTC (permalink / raw)
  To: Alexander Duyck; +Cc: tglx, mingo, hpa, andi, linux-kernel, x86

> The second change was to streamline the code by making use of the carry flag
> on an add operation instead of performing a compare on a 64 bit value.  The
> advantage to this is that it allows us to reduce the overall size of the call.
> On my Xeon E5 system the entire __phys_addr inline call consumes 30 bytes and
> 5 instructions.  I also applied similar logic to the debug version of the
> function.  My testing shows that the debug version of the function with this
> patch applied is slightly faster than the non-debug version without the patch.

Looks good. Thanks. 

Probably should still split the callers though (or have a pa_symbol_fast
that does not do the check)

-Andi

^ permalink raw reply	[flat|nested] 5+ messages in thread

* Re: [PATCH] x86: Improve 64 bit __phys_addr call performance
  2012-10-10 13:58 ` Andi Kleen
@ 2012-10-10 23:56   ` Alexander Duyck
  0 siblings, 0 replies; 5+ messages in thread
From: Alexander Duyck @ 2012-10-10 23:56 UTC (permalink / raw)
  To: Andi Kleen; +Cc: tglx, mingo, hpa, linux-kernel, x86

On 10/10/2012 06:58 AM, Andi Kleen wrote:
>> The second change was to streamline the code by making use of the carry flag
>> on an add operation instead of performing a compare on a 64 bit value.  The
>> advantage to this is that it allows us to reduce the overall size of the call.
>> On my Xeon E5 system the entire __phys_addr inline call consumes 30 bytes and
>> 5 instructions.  I also applied similar logic to the debug version of the
>> function.  My testing shows that the debug version of the function with this
>> patch applied is slightly faster than the non-debug version without the patch.
> Looks good. Thanks. 
>
> Probably should still split the callers though (or have a pa_symbol_fast
> that does not do the check)
>
> -Andi

I hadn't thought of that.  I couldn't drop support for symbols from
__pa, but I can get away with dropping support for regular addresses
from __pa_symbol.

I just submitted a patch to drop support for standard virtual addresses
from __pa_symbol.  I will also submit some patches tomorrow morning for
cleaning up a number of places I had found where we were calling
__pa/virt_to_phys when we should have been calling __pa_symbol.

Thanks,

Alex

^ permalink raw reply	[flat|nested] 5+ messages in thread

* Re: [PATCH] x86: Improve 64 bit __phys_addr call performance
  2012-10-09 18:50 [PATCH] x86: Improve 64 bit __phys_addr call performance Alexander Duyck
  2012-10-10 13:58 ` Andi Kleen
@ 2012-10-24 10:25 ` Ingo Molnar
  2012-10-24 16:31   ` Alexander Duyck
  1 sibling, 1 reply; 5+ messages in thread
From: Ingo Molnar @ 2012-10-24 10:25 UTC (permalink / raw)
  To: Alexander Duyck; +Cc: tglx, mingo, hpa, andi, linux-kernel, x86


* Alexander Duyck <alexander.h.duyck@intel.com> wrote:

> This patch is meant to improve overall system performance when 
> making use of the __phys_addr call on 64 bit x86 systems.  To 
> do this I have implemented several changes.
> 
> First if CONFIG_DEBUG_VIRTUAL is not defined __phys_addr is 
> made an inline, similar to how this is currently handled in 32 
> bit.  However in order to do this it is required to export 
> phys_base so that it is available if __phys_addr is used in 
> kernel modules.
> 
> The second change was to streamline the code by making use of 
> the carry flag on an add operation instead of performing a 
> compare on a 64 bit value.  The advantage to this is that it 
> allows us to reduce the overall size of the call. On my Xeon 
> E5 system the entire __phys_addr inline call consumes 30 bytes 
> and 5 instructions.  I also applied similar logic to the debug 
> version of the function.  My testing shows that the debug 
> version of the function with this patch applied is slightly 
> faster than the non-debug version without the patch.
> 
> Finally, when building the kernel with the first two changes 
> applied I saw build warnings about __START_KERNEL_map and 
> PAGE_OFFSET constants not fitting in their type.  In order to 
> resolve the build warning I changed their type from UL to ULL.
> 
> Signed-off-by: Alexander Duyck <alexander.h.duyck@intel.com>
> ---
> 
>  arch/x86/include/asm/page_64_types.h |   16 ++++++++++++++--
>  arch/x86/kernel/x8664_ksyms_64.c     |    3 +++
>  arch/x86/mm/physaddr.c               |   20 ++++++++++++++------
>  3 files changed, 31 insertions(+), 8 deletions(-)

> +#ifdef CONFIG_DEBUG_VIRTUAL
>  extern unsigned long __phys_addr(unsigned long);
> +#else
> +static inline unsigned long __phys_addr(unsigned long x)
> +{
> +	unsigned long y = x - __START_KERNEL_map;
> +
> +	/* use the carry flag to determine if x was < __START_KERNEL_map */
> +	x = y + ((x > y) ? phys_base : (__START_KERNEL_map - PAGE_OFFSET));
> +
> +	return x;
> +}

This is a rather frequently used primitive. By how much does 
this patch increase a 'make defconfig' kernel's vmlinux, as 
measured via 'size vmlinux'?

Thanks,

	Ingo

^ permalink raw reply	[flat|nested] 5+ messages in thread

* Re: [PATCH] x86: Improve 64 bit __phys_addr call performance
  2012-10-24 10:25 ` Ingo Molnar
@ 2012-10-24 16:31   ` Alexander Duyck
  0 siblings, 0 replies; 5+ messages in thread
From: Alexander Duyck @ 2012-10-24 16:31 UTC (permalink / raw)
  To: Ingo Molnar; +Cc: tglx, mingo, hpa, andi, linux-kernel, x86

On 10/24/2012 03:25 AM, Ingo Molnar wrote:
> * Alexander Duyck <alexander.h.duyck@intel.com> wrote:
>
>> This patch is meant to improve overall system performance when 
>> making use of the __phys_addr call on 64 bit x86 systems.  To 
>> do this I have implemented several changes.
>>
>> First if CONFIG_DEBUG_VIRTUAL is not defined __phys_addr is 
>> made an inline, similar to how this is currently handled in 32 
>> bit.  However in order to do this it is required to export 
>> phys_base so that it is available if __phys_addr is used in 
>> kernel modules.
>>
>> The second change was to streamline the code by making use of 
>> the carry flag on an add operation instead of performing a 
>> compare on a 64 bit value.  The advantage to this is that it 
>> allows us to reduce the overall size of the call. On my Xeon 
>> E5 system the entire __phys_addr inline call consumes 30 bytes 
>> and 5 instructions.  I also applied similar logic to the debug 
>> version of the function.  My testing shows that the debug 
>> version of the function with this patch applied is slightly 
>> faster than the non-debug version without the patch.
>>
>> Finally, when building the kernel with the first two changes 
>> applied I saw build warnings about __START_KERNEL_map and 
>> PAGE_OFFSET constants not fitting in their type.  In order to 
>> resolve the build warning I changed their type from UL to ULL.
>>
>> Signed-off-by: Alexander Duyck <alexander.h.duyck@intel.com>
>> ---
>>
>>  arch/x86/include/asm/page_64_types.h |   16 ++++++++++++++--
>>  arch/x86/kernel/x8664_ksyms_64.c     |    3 +++
>>  arch/x86/mm/physaddr.c               |   20 ++++++++++++++------
>>  3 files changed, 31 insertions(+), 8 deletions(-)
>> +#ifdef CONFIG_DEBUG_VIRTUAL
>>  extern unsigned long __phys_addr(unsigned long);
>> +#else
>> +static inline unsigned long __phys_addr(unsigned long x)
>> +{
>> +	unsigned long y = x - __START_KERNEL_map;
>> +
>> +	/* use the carry flag to determine if x was < __START_KERNEL_map */
>> +	x = y + ((x > y) ? phys_base : (__START_KERNEL_map - PAGE_OFFSET));
>> +
>> +	return x;
>> +}
> This is a rather frequently used primitive. By how much does 
> this patch increase a 'make defconfig' kernel's vmlinux, as 
> measured via 'size vmlinux'?
>
> Thanks,
>
> 	Ingo

Here is the before and after:

Before
    text    data     bss      dec    hex filename
10368528 1047480 1122304 12538312 bf51c8 vmlinux

After
    text    data     bss      dec    hex filename
10372216 1047480 1122304 12542000 bf6030 vmlinux

I also have some patches are going into the swiotlb.  With them in it
reduces the size a bit but still doesn't get us back to the original size:

After SWIOTLB
    text    data     bss      dec    hex filename
10371860 1047480 1122304 12541644 bf5ecc vmlinux

The total increase in size amounts to about 3.6K without the SWIOTLB
changes, and about 3.3K with.

Thanks,

Alex

^ permalink raw reply	[flat|nested] 5+ messages in thread

end of thread, other threads:[~2012-10-24 16:31 UTC | newest]

Thread overview: 5+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2012-10-09 18:50 [PATCH] x86: Improve 64 bit __phys_addr call performance Alexander Duyck
2012-10-10 13:58 ` Andi Kleen
2012-10-10 23:56   ` Alexander Duyck
2012-10-24 10:25 ` Ingo Molnar
2012-10-24 16:31   ` Alexander Duyck

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).