linux-kernel.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
* [PATCH] m68k: merge the mmu and non-mmu versions of checksum.h
@ 2009-06-17  7:11 Greg Ungerer
  2009-06-18 19:45 ` Christoph Hellwig
  0 siblings, 1 reply; 3+ messages in thread
From: Greg Ungerer @ 2009-06-17  7:11 UTC (permalink / raw)
  To: linux-kernel; +Cc: gerg, linux-m68k

[PATCH] m68k: merge the mmu and non-mmu versions of checksum.h

There is only minimal difference between the mmu and non-mmu versions
of checksum.h. Most of its contents is shared. Merge the two separate
files back into a single checksum.h.

Signed-off-by: Greg Ungerer <gerg@uclinux.org>
---
 .../m68k/include/asm/{checksum_mm.h => checksum.h} |   11 ++
 arch/m68k/include/asm/checksum_no.h                |  132 --------------------
 arch/m68knommu/lib/checksum.c                      |    9 --
 3 files changed, 11 insertions(+), 141 deletions(-)
 rename arch/m68k/include/asm/{checksum_mm.h => checksum.h} (93%)
 delete mode 100644 arch/m68k/include/asm/checksum_no.h

diff --git a/arch/m68k/include/asm/checksum_mm.h b/arch/m68k/include/asm/checksum.h
similarity index 93%
rename from arch/m68k/include/asm/checksum_mm.h
rename to arch/m68k/include/asm/checksum.h
index 494f9ae..87cabc3 100644
--- a/arch/m68k/include/asm/checksum_mm.h
+++ b/arch/m68k/include/asm/checksum.h
@@ -34,6 +34,8 @@ extern __wsum csum_partial_copy_nocheck(const void *src,
 					      void *dst, int len,
 					      __wsum sum);
 
+
+#ifdef CONFIG_MMU
 /*
  *	This is a version of ip_compute_csum() optimized for IP headers,
  *	which always checksum on 4 octet boundaries.
@@ -59,6 +61,9 @@ static inline __sum16 ip_fast_csum(const void *iph, unsigned int ihl)
 		 : "memory");
 	return (__force __sum16)~sum;
 }
+#else
+__sum16 ip_fast_csum(const void *iph, unsigned int ihl);
+#endif
 
 /*
  *	Fold a partial checksum
@@ -67,6 +72,11 @@ static inline __sum16 ip_fast_csum(const void *iph, unsigned int ihl)
 static inline __sum16 csum_fold(__wsum sum)
 {
 	unsigned int tmp = (__force u32)sum;
+#ifdef CONFIG_COLDFIRE
+	tmp = (tmp & 0xffff) + (tmp >> 16);
+	tmp = (tmp & 0xffff) + (tmp >> 16);
+	return (__force __sum16)~tmp;
+#else
 	__asm__("swap %1\n\t"
 		"addw %1, %0\n\t"
 		"clrw %1\n\t"
@@ -74,6 +84,7 @@ static inline __sum16 csum_fold(__wsum sum)
 		: "=&d" (sum), "=&d" (tmp)
 		: "0" (sum), "1" (tmp));
 	return (__force __sum16)~sum;
+#endif
 }
 
 
diff --git a/arch/m68k/include/asm/checksum_no.h b/arch/m68k/include/asm/checksum_no.h
deleted file mode 100644
index 8188348..0000000
--- a/arch/m68k/include/asm/checksum_no.h
+++ /dev/null
@@ -1,132 +0,0 @@
-#ifndef _M68K_CHECKSUM_H
-#define _M68K_CHECKSUM_H
-
-#include <linux/in6.h>
-
-/*
- * computes the checksum of a memory block at buff, length len,
- * and adds in "sum" (32-bit)
- *
- * returns a 32-bit number suitable for feeding into itself
- * or csum_tcpudp_magic
- *
- * this function must be called with even lengths, except
- * for the last fragment, which may be odd
- *
- * it's best to have buff aligned on a 32-bit boundary
- */
-__wsum csum_partial(const void *buff, int len, __wsum sum);
-
-/*
- * the same as csum_partial, but copies from src while it
- * checksums
- *
- * here even more important to align src and dst on a 32-bit (or even
- * better 64-bit) boundary
- */
-
-__wsum csum_partial_copy_nocheck(const void *src, void *dst,
-	int len, __wsum sum);
-
-
-/*
- * the same as csum_partial_copy, but copies from user space.
- *
- * here even more important to align src and dst on a 32-bit (or even
- * better 64-bit) boundary
- */
-
-extern __wsum csum_partial_copy_from_user(const void __user *src,
-	void *dst, int len, __wsum sum, int *csum_err);
-
-__sum16 ip_fast_csum(const void *iph, unsigned int ihl);
-
-/*
- *	Fold a partial checksum
- */
-
-static inline __sum16 csum_fold(__wsum sum)
-{
-	unsigned int tmp = (__force u32)sum;
-#ifdef CONFIG_COLDFIRE
-	tmp = (tmp & 0xffff) + (tmp >> 16);
-	tmp = (tmp & 0xffff) + (tmp >> 16);
-	return (__force __sum16)~tmp;
-#else
-	__asm__("swap %1\n\t"
-		"addw %1, %0\n\t"
-		"clrw %1\n\t"
-		"addxw %1, %0"
-		: "=&d" (sum), "=&d" (tmp)
-		: "0" (sum), "1" (sum));
-	return (__force __sum16)~sum;
-#endif
-}
-
-
-/*
- * computes the checksum of the TCP/UDP pseudo-header
- * returns a 16-bit checksum, already complemented
- */
-
-static inline __wsum
-csum_tcpudp_nofold(__be32 saddr, __be32 daddr, unsigned short len,
-		  unsigned short proto, __wsum sum)
-{
-	__asm__ ("addl  %1,%0\n\t"
-		 "addxl %4,%0\n\t"
-		 "addxl %5,%0\n\t"
-		 "clrl %1\n\t"
-		 "addxl %1,%0"
-		 : "=&d" (sum), "=&d" (saddr)
-		 : "0" (daddr), "1" (saddr), "d" (len + proto),
-		   "d"(sum));
-	return sum;
-}
-
-static inline __sum16
-csum_tcpudp_magic(__be32 saddr, __be32 daddr, unsigned short len,
-		  unsigned short proto, __wsum sum)
-{
-	return csum_fold(csum_tcpudp_nofold(saddr,daddr,len,proto,sum));
-}
-
-/*
- * this routine is used for miscellaneous IP-like checksums, mainly
- * in icmp.c
- */
-
-extern __sum16 ip_compute_csum(const void *buff, int len);
-
-#define _HAVE_ARCH_IPV6_CSUM
-static __inline__ __sum16
-csum_ipv6_magic(const struct in6_addr *saddr, const struct in6_addr *daddr,
-		__u32 len, unsigned short proto, __wsum sum)
-{
-	register unsigned long tmp;
-	__asm__("addl %2@,%0\n\t"
-		"movel %2@(4),%1\n\t"
-		"addxl %1,%0\n\t"
-		"movel %2@(8),%1\n\t"
-		"addxl %1,%0\n\t"
-		"movel %2@(12),%1\n\t"
-		"addxl %1,%0\n\t"
-		"movel %3@,%1\n\t"
-		"addxl %1,%0\n\t"
-		"movel %3@(4),%1\n\t"
-		"addxl %1,%0\n\t"
-		"movel %3@(8),%1\n\t"
-		"addxl %1,%0\n\t"
-		"movel %3@(12),%1\n\t"
-		"addxl %1,%0\n\t"
-		"addxl %4,%0\n\t"
-		"clrl %1\n\t"
-		"addxl %1,%0"
-		: "=&d" (sum), "=&d" (tmp)
-		: "a" (saddr), "a" (daddr), "d" (len + proto),
-		  "0" (sum));
-
-	return csum_fold(sum);
-}
-
-#endif /* _M68K_CHECKSUM_H */
diff --git a/arch/m68knommu/lib/checksum.c b/arch/m68knommu/lib/checksum.c
index 269d83b..8bf012a 100644
--- a/arch/m68knommu/lib/checksum.c
+++ b/arch/m68knommu/lib/checksum.c
@@ -127,15 +127,6 @@ __wsum csum_partial(const void *buff, int len, __wsum sum)
 EXPORT_SYMBOL(csum_partial);
 
 /*
- * this routine is used for miscellaneous IP-like checksums, mainly
- * in icmp.c
- */
-__sum16 ip_compute_csum(const void *buff, int len)
-{
-	return (__force __sum16)~do_csum(buff,len);
-}
-
-/*
  * copy from fs while checksumming, otherwise like csum_partial
  */
 

^ permalink raw reply related	[flat|nested] 3+ messages in thread

* Re: [PATCH] m68k: merge the mmu and non-mmu versions of checksum.h
  2009-06-17  7:11 [PATCH] m68k: merge the mmu and non-mmu versions of checksum.h Greg Ungerer
@ 2009-06-18 19:45 ` Christoph Hellwig
  2009-06-19  6:54   ` Greg Ungerer
  0 siblings, 1 reply; 3+ messages in thread
From: Christoph Hellwig @ 2009-06-18 19:45 UTC (permalink / raw)
  To: Greg Ungerer; +Cc: linux-kernel, gerg, linux-m68k

On Wed, Jun 17, 2009 at 05:11:15PM +1000, Greg Ungerer wrote:
> +#ifdef CONFIG_MMU
>  /*
>   *	This is a version of ip_compute_csum() optimized for IP headers,
>   *	which always checksum on 4 octet boundaries.
> @@ -59,6 +61,9 @@ static inline __sum16 ip_fast_csum(const void *iph, unsigned int ihl)
>  		 : "memory");
>  	return (__force __sum16)~sum;
>  }
> +#else
> +__sum16 ip_fast_csum(const void *iph, unsigned int ihl);
> +#endif

Any good reason this is inline for all mmu processors and out of line
for nommu, independent of the actual cpu variant?

>  static inline __sum16 csum_fold(__wsum sum)
>  {
>  	unsigned int tmp = (__force u32)sum;
> +#ifdef CONFIG_COLDFIRE
> +	tmp = (tmp & 0xffff) + (tmp >> 16);
> +	tmp = (tmp & 0xffff) + (tmp >> 16);
> +	return (__force __sum16)~tmp;
> +#else
>  	__asm__("swap %1\n\t"
>  		"addw %1, %0\n\t"
>  		"clrw %1\n\t"
> @@ -74,6 +84,7 @@ static inline __sum16 csum_fold(__wsum sum)
>  		: "=&d" (sum), "=&d" (tmp)
>  		: "0" (sum), "1" (tmp));
>  	return (__force __sum16)~sum;
> +#endif
>  }

I think this would be cleaner by having totally separate functions
for both cases, e.g.

#ifdef CONFIG_COLDFIRE
static inline __sum16 csum_fold(__wsum sum)
{
	unsigned int tmp = (__force u32)sum;

	tmp = (tmp & 0xffff) + (tmp >> 16);
	tmp = (tmp & 0xffff) + (tmp >> 16);

	return (__force __sum16)~tmp;
}
#else
...
#endif

^ permalink raw reply	[flat|nested] 3+ messages in thread

* Re: [PATCH] m68k: merge the mmu and non-mmu versions of checksum.h
  2009-06-18 19:45 ` Christoph Hellwig
@ 2009-06-19  6:54   ` Greg Ungerer
  0 siblings, 0 replies; 3+ messages in thread
From: Greg Ungerer @ 2009-06-19  6:54 UTC (permalink / raw)
  To: Christoph Hellwig; +Cc: linux-kernel, gerg, linux-m68k

Hi Christoph,

Christoph Hellwig wrote:
> On Wed, Jun 17, 2009 at 05:11:15PM +1000, Greg Ungerer wrote:
>> +#ifdef CONFIG_MMU
>>  /*
>>   *	This is a version of ip_compute_csum() optimized for IP headers,
>>   *	which always checksum on 4 octet boundaries.
>> @@ -59,6 +61,9 @@ static inline __sum16 ip_fast_csum(const void *iph, unsigned int ihl)
>>  		 : "memory");
>>  	return (__force __sum16)~sum;
>>  }
>> +#else
>> +__sum16 ip_fast_csum(const void *iph, unsigned int ihl);
>> +#endif
> 
> Any good reason this is inline for all mmu processors and out of line
> for nommu, independent of the actual cpu variant?

I don't recall of the simple (and thus non-mmu) m68k variants
support all the instructions used in this optimized version.
I will check that. It might be that this is mis-placed and
is actually conditional on the CPU type.

The C code version is significantly bigger, I think that is why
it was not inlined here (see arch/m68knommu/lib/checksum.c)


>>  static inline __sum16 csum_fold(__wsum sum)
>>  {
>>  	unsigned int tmp = (__force u32)sum;
>> +#ifdef CONFIG_COLDFIRE
>> +	tmp = (tmp & 0xffff) + (tmp >> 16);
>> +	tmp = (tmp & 0xffff) + (tmp >> 16);
>> +	return (__force __sum16)~tmp;
>> +#else
>>  	__asm__("swap %1\n\t"
>>  		"addw %1, %0\n\t"
>>  		"clrw %1\n\t"
>> @@ -74,6 +84,7 @@ static inline __sum16 csum_fold(__wsum sum)
>>  		: "=&d" (sum), "=&d" (tmp)
>>  		: "0" (sum), "1" (tmp));
>>  	return (__force __sum16)~sum;
>> +#endif
>>  }
> 
> I think this would be cleaner by having totally separate functions
> for both cases, e.g.
> 
> #ifdef CONFIG_COLDFIRE
> static inline __sum16 csum_fold(__wsum sum)
> {
> 	unsigned int tmp = (__force u32)sum;
> 
> 	tmp = (tmp & 0xffff) + (tmp >> 16);
> 	tmp = (tmp & 0xffff) + (tmp >> 16);
> 
> 	return (__force __sum16)~tmp;
> }
> #else
> ...
> #endif

Ok, I will change that.

Thanks
Greg


------------------------------------------------------------------------
Greg Ungerer  --  Principal Engineer        EMAIL:     gerg@snapgear.com
SnapGear Group, McAfee                      PHONE:       +61 7 3435 2888
825 Stanley St,                             FAX:         +61 7 3891 3630
Woolloongabba, QLD, 4102, Australia         WEB: http://www.SnapGear.com

^ permalink raw reply	[flat|nested] 3+ messages in thread

end of thread, other threads:[~2009-06-19  6:54 UTC | newest]

Thread overview: 3+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2009-06-17  7:11 [PATCH] m68k: merge the mmu and non-mmu versions of checksum.h Greg Ungerer
2009-06-18 19:45 ` Christoph Hellwig
2009-06-19  6:54   ` Greg Ungerer

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).