* [PATCH] arm64: add missing conversion to __wsum in ip_fast_csum()
@ 2017-06-28 14:58 Luc Van Oostenryck
2017-06-29 10:07 ` Will Deacon
0 siblings, 1 reply; 3+ messages in thread
From: Luc Van Oostenryck @ 2017-06-28 14:58 UTC (permalink / raw)
To: linux-arm-kernel
ARM64 implementation of ip_fast_csum() do most of the work
in 128 or 64 bit and call csum_fold() to finalize. csum_fold()
itself take a __wsum argument, to insure that this value is
always a 32bit native-order value.
Fix this by using an helper __csum_fold() taking the native
32 bit value and doing the needed folding to 16 bit (and reuse
this helper for csum_fold() itself).
Note: a simpler patch would to use something like:
return csum_fold((__wsum __force)(sum >> 32));
but using an helper __csum_fold() allow to avoid
a forced cast.
Signed-off-by: Luc Van Oostenryck <luc.vanoostenryck@gmail.com>
---
arch/arm64/include/asm/checksum.h | 10 +++++++---
1 file changed, 7 insertions(+), 3 deletions(-)
diff --git a/arch/arm64/include/asm/checksum.h b/arch/arm64/include/asm/checksum.h
index 09f65339d..dcc655137 100644
--- a/arch/arm64/include/asm/checksum.h
+++ b/arch/arm64/include/asm/checksum.h
@@ -18,12 +18,16 @@
#include <linux/types.h>
-static inline __sum16 csum_fold(__wsum csum)
+static inline __sum16 __csum_fold(u32 sum)
{
- u32 sum = (__force u32)csum;
sum += (sum >> 16) | (sum << 16);
return ~(__force __sum16)(sum >> 16);
}
+
+static inline __sum16 csum_fold(__wsum csum)
+{
+ return __csum_fold((__force u32)csum);
+}
#define csum_fold csum_fold
static inline __sum16 ip_fast_csum(const void *iph, unsigned int ihl)
@@ -42,7 +46,7 @@ static inline __sum16 ip_fast_csum(const void *iph, unsigned int ihl)
} while (--ihl);
sum += ((sum >> 32) | (sum << 32));
- return csum_fold(sum >> 32);
+ return __csum_fold(sum >> 32);
}
#define ip_fast_csum ip_fast_csum
--
2.13.0
^ permalink raw reply related [flat|nested] 3+ messages in thread
* [PATCH] arm64: add missing conversion to __wsum in ip_fast_csum()
2017-06-28 14:58 [PATCH] arm64: add missing conversion to __wsum in ip_fast_csum() Luc Van Oostenryck
@ 2017-06-29 10:07 ` Will Deacon
2017-06-29 14:31 ` [PATCH v2] " Luc Van Oostenryck
0 siblings, 1 reply; 3+ messages in thread
From: Will Deacon @ 2017-06-29 10:07 UTC (permalink / raw)
To: linux-arm-kernel
On Wed, Jun 28, 2017 at 04:58:14PM +0200, Luc Van Oostenryck wrote:
> ARM64 implementation of ip_fast_csum() do most of the work
> in 128 or 64 bit and call csum_fold() to finalize. csum_fold()
> itself take a __wsum argument, to insure that this value is
> always a 32bit native-order value.
>
> Fix this by using an helper __csum_fold() taking the native
> 32 bit value and doing the needed folding to 16 bit (and reuse
> this helper for csum_fold() itself).
>
> Note: a simpler patch would to use something like:
> return csum_fold((__wsum __force)(sum >> 32));
> but using an helper __csum_fold() allow to avoid
> a forced cast.
But you've added a __force cast in csum_fold, and we still have the one in
the return statement of __csum_fold, so I think I prefer the simpler patch.
Will
>
> Signed-off-by: Luc Van Oostenryck <luc.vanoostenryck@gmail.com>
> ---
> arch/arm64/include/asm/checksum.h | 10 +++++++---
> 1 file changed, 7 insertions(+), 3 deletions(-)
>
> diff --git a/arch/arm64/include/asm/checksum.h b/arch/arm64/include/asm/checksum.h
> index 09f65339d..dcc655137 100644
> --- a/arch/arm64/include/asm/checksum.h
> +++ b/arch/arm64/include/asm/checksum.h
> @@ -18,12 +18,16 @@
>
> #include <linux/types.h>
>
> -static inline __sum16 csum_fold(__wsum csum)
> +static inline __sum16 __csum_fold(u32 sum)
> {
> - u32 sum = (__force u32)csum;
> sum += (sum >> 16) | (sum << 16);
> return ~(__force __sum16)(sum >> 16);
> }
> +
> +static inline __sum16 csum_fold(__wsum csum)
> +{
> + return __csum_fold((__force u32)csum);
> +}
> #define csum_fold csum_fold
>
> static inline __sum16 ip_fast_csum(const void *iph, unsigned int ihl)
> @@ -42,7 +46,7 @@ static inline __sum16 ip_fast_csum(const void *iph, unsigned int ihl)
> } while (--ihl);
>
> sum += ((sum >> 32) | (sum << 32));
> - return csum_fold(sum >> 32);
> + return __csum_fold(sum >> 32);
> }
> #define ip_fast_csum ip_fast_csum
>
> --
> 2.13.0
>
^ permalink raw reply [flat|nested] 3+ messages in thread
* [PATCH v2] arm64: add missing conversion to __wsum in ip_fast_csum()
2017-06-29 10:07 ` Will Deacon
@ 2017-06-29 14:31 ` Luc Van Oostenryck
0 siblings, 0 replies; 3+ messages in thread
From: Luc Van Oostenryck @ 2017-06-29 14:31 UTC (permalink / raw)
To: linux-arm-kernel
ARM64 implementation of ip_fast_csum() do most of the work
in 128 or 64 bit and call csum_fold() to finalize. csum_fold()
itself take a __wsum argument, to insure that this value is
always a 32bit native-order value.
Fix this by adding the sadly needed '__force' to cast the native
'sum' to the type '__wsum'.
Signed-off-by: Luc Van Oostenryck <luc.vanoostenryck@gmail.com>
---
Change since v1:
- use the simple __force cast instead of an intermediate helper.
---
arch/arm64/include/asm/checksum.h | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/arch/arm64/include/asm/checksum.h b/arch/arm64/include/asm/checksum.h
index 09f65339d..0b6f5a7d4 100644
--- a/arch/arm64/include/asm/checksum.h
+++ b/arch/arm64/include/asm/checksum.h
@@ -42,7 +42,7 @@ static inline __sum16 ip_fast_csum(const void *iph, unsigned int ihl)
} while (--ihl);
sum += ((sum >> 32) | (sum << 32));
- return csum_fold(sum >> 32);
+ return csum_fold((__force u32)(sum >> 32));
}
#define ip_fast_csum ip_fast_csum
--
2.13.0
^ permalink raw reply related [flat|nested] 3+ messages in thread
end of thread, other threads:[~2017-06-29 14:31 UTC | newest]
Thread overview: 3+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2017-06-28 14:58 [PATCH] arm64: add missing conversion to __wsum in ip_fast_csum() Luc Van Oostenryck
2017-06-29 10:07 ` Will Deacon
2017-06-29 14:31 ` [PATCH v2] " Luc Van Oostenryck
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).