From: Thomas Gleixner <tglx@linutronix.de>
To: Al Viro <viro@zeniv.linux.org.uk>, linux-arch@vger.kernel.org
Cc: gus Gusenleitner Klaus <gus@keba.com>,
Al Viro <viro@ftp.linux.org.uk>,
lkml <linux-kernel@vger.kernel.org>,
Ingo Molnar <mingo@redhat.com>, "bp@alien8.de" <bp@alien8.de>,
"dave.hansen@linux.intel.com" <dave.hansen@linux.intel.com>,
"x86@kernel.org" <x86@kernel.org>,
"David S. Miller" <davem@davemloft.net>,
"dsahern@kernel.org" <dsahern@kernel.org>,
"kuba@kernel.org" <kuba@kernel.org>,
Paolo Abeni <pabeni@redhat.com>,
Eric Dumazet <edumazet@google.com>
Subject: Re: [RFC][PATCH] fix csum_and_copy_..._user() idiocy. Re: AW: [PATCH] amd64: Fix csum_partial_copy_generic()
Date: Mon, 23 Oct 2023 12:37:58 +0200 [thread overview]
Message-ID: <87wmvdd3p5.ffs@tglx> (raw)
In-Reply-To: <20231022194618.GC800259@ZenIV>
On Sun, Oct 22 2023 at 20:46, Al Viro wrote:
> @@ -113,14 +113,14 @@ csum_partial_cfu_aligned(const unsigned long __user *src, unsigned long *dst,
> *dst = word | tmp;
> checksum += carry;
> }
> - return checksum;
> + return from64to16 (checksum);
from64to16(checksum); all over the place
>
> #define _HAVE_ARCH_COPY_AND_CSUM_FROM_USER
> #define _HAVE_ARCH_CSUM_AND_COPY
> static inline
> -__wsum csum_and_copy_from_user(const void __user *src, void *dst, int len)
> +__wsum_fault csum_and_copy_from_user(const void __user *src, void *dst, int len)
> {
> if (!access_ok(src, len))
> - return 0;
> + return -1;
return CSUM_FAULT;
> /*
> diff --git a/arch/arm/lib/csumpartialcopygeneric.S b/arch/arm/lib/csumpartialcopygeneric.S
> index 0fd5c10e90a7..5db935eaf165 100644
> --- a/arch/arm/lib/csumpartialcopygeneric.S
> +++ b/arch/arm/lib/csumpartialcopygeneric.S
> @@ -86,7 +86,7 @@ sum .req r3
>
> FN_ENTRY
> save_regs
> - mov sum, #-1
> + mov sum, #0
>
> cmp len, #8 @ Ensure that we have at least
> blo .Lless8 @ 8 bytes to copy.
> @@ -160,6 +160,7 @@ FN_ENTRY
> ldr sum, [sp, #0] @ dst
> tst sum, #1
> movne r0, r0, ror #8
> + mov r1, #0
> load_regs
>
> .Lsrc_not_aligned:
> diff --git a/arch/arm/lib/csumpartialcopyuser.S b/arch/arm/lib/csumpartialcopyuser.S
> index 6928781e6bee..f273c9667914 100644
> --- a/arch/arm/lib/csumpartialcopyuser.S
> +++ b/arch/arm/lib/csumpartialcopyuser.S
> @@ -73,11 +73,11 @@
> #include "csumpartialcopygeneric.S"
>
> /*
> - * We report fault by returning 0 csum - impossible in normal case, since
> - * we start with 0xffffffff for initial sum.
> + * We report fault by returning ~0ULL csum
> */
There is also a stale comment a few lines further up.
> .pushsection .text.fixup,"ax"
> .align 4
> -9001: mov r0, #0
> +9001: mov r0, #-1
> + mov r1, #-1
> load_regs
> .popsection
> #include <linux/errno.h>
> -#include <asm/types.h>
> +#include <linux/bitops.h>
> #include <asm/byteorder.h>
> +
> +typedef u64 __bitwise __wsum_fault;
newline please.
> +static inline __wsum_fault to_wsum_fault(__wsum v)
> +{
> +#if defined(CONFIG_64BIT) || defined(__LITTLE_ENDIAN__)
> + return (__force __wsum_fault)v;
> +#else
> + return (__force __wsum_fault)((__force u64)v << 32);
> +#endif
> +}
> +
> +static inline __wsum_fault from_wsum_fault(__wsum v)
> +{
> +#if defined(CONFIG_64BIT) || defined(__LITTLE_ENDIAN__)
> + return (__force __wsum)v;
> +#else
> + return (__force __wsum)((__force u64)v >> 32);
> +#endif
> +}
> +
> +static inline bool wsum_fault_check(__wsum_fault v)
> +{
> +#if defined(CONFIG_64BIT) || defined(__LITTLE_ENDIAN__)
> + return (__force s64)v < 0;
> +#else
> + return (int)(__force u32)v < 0;
Why not __force s32 right away?
> #include <asm/checksum.h>
> #if !defined(_HAVE_ARCH_COPY_AND_CSUM_FROM_USER) || !defined(HAVE_CSUM_COPY_USER)
> #include <linux/uaccess.h>
> @@ -25,24 +57,24 @@
>
> #ifndef _HAVE_ARCH_COPY_AND_CSUM_FROM_USER
> static __always_inline
> -__wsum csum_and_copy_from_user (const void __user *src, void *dst,
> +__wsum_fault csum_and_copy_from_user (const void __user *src, void *dst,
> int len)
> {
> if (copy_from_user(dst, src, len))
> - return 0;
> - return csum_partial(dst, len, ~0U);
> + return CSUM_FAULT;
> + return to_wsum_fault(csum_partial(dst, len, 0));
> }
> #endif
> #ifndef HAVE_CSUM_COPY_USER
> -static __always_inline __wsum csum_and_copy_to_user
> +static __always_inline __wsum_fault csum_and_copy_to_user
> (const void *src, void __user *dst, int len)
> {
> - __wsum sum = csum_partial(src, len, ~0U);
> + __wsum sum = csum_partial(src, len, 0);
>
> if (copy_to_user(dst, src, len) == 0)
> - return sum;
> - return 0;
> + return to_wsum_fault(sum);
> + return CSUM_FAULT;
if (copy_to_user(dst, src, len))
return CSUM_FAULT;
return to_wsum_fault(sum);
So it follows the pattern of csum_and_copy_from_user() above?
> size_t csum_and_copy_from_iter(void *addr, size_t bytes, __wsum *csum,
> struct iov_iter *i)
> {
> - __wsum sum, next;
> + __wsum sum;
> + __wsum_fault next;
> sum = *csum;
> if (WARN_ON_ONCE(!i->data_source))
> return 0;
>
> iterate_and_advance(i, bytes, base, len, off, ({
> next = csum_and_copy_from_user(base, addr + off, len);
> - sum = csum_block_add(sum, next, off);
> - next ? 0 : len;
> + sum = csum_block_add(sum, from_wsum_fault(next), off);
> + likely(!wsum_fault_check(next)) ? 0 : len;
This macro maze is confusing as hell.
Looking at iterate_buf() which is the least convoluted. That resolves to
the following:
len = bytes;
...
next = csum_and_copy_from_user(...);
...
len -= !wsum_fault_check(next) ? 0 : len;
...
bytes = len;
...
return bytes;
So it correctly returns 'bytes' for the success case and 0 for the fault
case.
Now decrypting iterate_iovec():
off = 0;
do {
....
len -= !wsum_fault_check(next) ? 0 : len;
off += len;
skip += len;
bytes- -= len;
if (skip < __p->iov_len) <- breaks on fault
break;
...
} while(bytes);
bytes = off;
...
return bytes;
Which means that if the first vector is successfully copied, then 'off'
is greater 0. A fault on the second one will correctly break out of the
loop, but the function will incorrectly return a value > 0, i.e. the
length of the first iteration.
As the callers just check for != 0 such a partial copy is considered
success, no?
So instead of
likely(!wsum_fault_check(next)) ? 0 : len;
shouldn't this just do:
if (unlikely(wsum_fault_check(next))
return 0;
len;
for simplicity and mental sanity sake?
Thanks,
tglx
next prev parent reply other threads:[~2023-10-23 10:38 UTC|newest]
Thread overview: 66+ messages / expand[flat|nested] mbox.gz Atom feed top
2023-10-18 6:18 [PATCH] amd64: Fix csum_partial_copy_generic() gus Gusenleitner Klaus
2023-10-18 7:36 ` Eric Dumazet
2023-10-18 15:12 ` Thomas Gleixner
2023-10-18 15:42 ` Al Viro
2023-10-18 17:37 ` Thomas Gleixner
2023-10-19 4:44 ` AW: " gus Gusenleitner Klaus
2023-10-19 5:02 ` Al Viro
2023-10-19 6:14 ` Al Viro
2023-10-19 6:39 ` Al Viro
2023-10-19 7:39 ` Eric Dumazet
2023-10-19 8:06 ` Al Viro
2023-10-20 8:32 ` Vincent Whitchurch
2023-10-20 20:27 ` David Laight
2023-10-21 7:15 ` Al Viro
2023-10-21 22:22 ` Al Viro
2023-10-22 11:03 ` David Laight
2023-10-22 11:11 ` Al Viro
2023-10-23 8:16 ` David Laight
2023-10-24 2:59 ` Al Viro
2023-10-22 19:40 ` [RFC][PATCH] fix csum_and_copy_..._user() idiocy. " Al Viro
2023-10-22 19:46 ` Al Viro
2023-10-23 10:37 ` Thomas Gleixner [this message]
2023-10-24 4:26 ` Al Viro
2023-10-24 12:31 ` Thomas Gleixner
2023-10-23 14:44 ` David Laight
2023-10-24 3:53 ` Al Viro
2023-12-05 2:21 ` [RFC][PATCHES v2] checksum stuff Al Viro
2023-12-05 2:23 ` [PATCH v2 01/18] make net/checksum.h self-contained Al Viro
2023-12-05 2:23 ` [PATCH v2 1/9] reiserfs: Avoid touching renamed directory if parent does not change Al Viro
2023-12-05 2:23 ` [PATCH v2 02/18] get rid of asm/checksum.h includes outside of include/net/checksum.h and arch Al Viro
2023-12-05 2:23 ` [PATCH v2 2/9] ocfs2: Avoid touching renamed directory if parent does not change Al Viro
2023-12-05 2:23 ` [PATCH v2 03/18] make net/checksum.h the sole user of asm/checksum.h Al Viro
2023-12-05 2:23 ` [PATCH v2 3/9] udf_rename(): only access the child content on cross-directory rename Al Viro
2023-12-05 2:23 ` [PATCH v2 4/9] ext2: Avoid reading renamed directory if parent does not change Al Viro
2023-12-05 2:23 ` [PATCH v2 04/18] Fix the csum_and_copy_..._user() idiocy Al Viro
2023-12-05 2:24 ` [PATCH v2 05/18] bits missing from csum_and_copy_{from,to}_user() unexporting Al Viro
2023-12-05 2:24 ` [PATCH v2 5/9] ext4: don't access the source subdirectory content on same-directory rename Al Viro
2023-12-05 2:24 ` [PATCH v2 06/18] consolidate csum_tcpudp_magic(), take default variant into net/checksum.h Al Viro
2023-12-05 2:24 ` [PATCH v2 6/9] f2fs: Avoid reading renamed directory if parent does not change Al Viro
2023-12-05 2:24 ` [PATCH v2 07/18] consolidate default ip_compute_csum() Al Viro
2023-12-05 2:24 ` [PATCH v2 7/9] rename(): fix the locking of subdirectories Al Viro
2023-12-05 2:24 ` [PATCH v2 08/18] alpha: pull asm-generic/checksum.h Al Viro
2023-12-05 2:24 ` [PATCH v2 8/9] kill lock_two_inodes() Al Viro
2023-12-05 2:24 ` [PATCH v2 09/18] mips: pull include of asm-generic/checksum.h out of #if Al Viro
2023-12-05 2:24 ` [PATCH v2 9/9] rename(): avoid a deadlock in the case of parents having no common ancestor Al Viro
2023-12-05 2:24 ` [PATCH v2 10/18] nios2: pull asm-generic/checksum.h Al Viro
2023-12-05 2:24 ` [PATCH v2 11/18] x86: merge csum_fold() for 32bit and 64bit Al Viro
2023-12-05 2:24 ` [PATCH v2 12/18] x86: merge ip_fast_csum() " Al Viro
2023-12-05 2:24 ` [PATCH v2 13/18] x86: merge csum_tcpudp_nofold() " Al Viro
2023-12-05 2:24 ` [PATCH v2 14/18] amd64: saner handling of odd address in csum_partial() Al Viro
2023-12-05 2:24 ` [PATCH v2 15/18] x86: optimized csum_add() is the same for 32bit and 64bit Al Viro
2023-12-05 2:24 ` [PATCH v2 16/18] x86: lift the extern for csum_partial() into checksum.h Al Viro
2023-12-05 2:24 ` [PATCH v2 17/18] x86_64: move csum_ipv6_magic() from csum-wrappers_64.c to csum-partial_64.c Al Viro
2023-12-05 2:24 ` [PATCH v2 18/18] uml/x86: use normal x86 checksum.h Al Viro
2024-01-03 22:02 ` Richard Weinberger
2023-12-05 2:27 ` [RFC][PATCHES v2] checksum stuff Al Viro
2023-12-06 11:10 ` David Laight
2023-12-06 22:43 ` Al Viro
2023-12-07 9:58 ` David Laight
2023-12-08 12:04 ` David Laight
2023-12-08 14:17 ` Al Viro
2023-12-08 15:29 ` Al Viro
2023-12-08 15:56 ` David Laight
2023-12-08 18:35 ` Al Viro
2023-10-19 11:45 ` AW: [PATCH] amd64: Fix csum_partial_copy_generic() Thomas Gleixner
2023-10-19 10:33 ` David Laight
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=87wmvdd3p5.ffs@tglx \
--to=tglx@linutronix.de \
--cc=bp@alien8.de \
--cc=dave.hansen@linux.intel.com \
--cc=davem@davemloft.net \
--cc=dsahern@kernel.org \
--cc=edumazet@google.com \
--cc=gus@keba.com \
--cc=kuba@kernel.org \
--cc=linux-arch@vger.kernel.org \
--cc=linux-kernel@vger.kernel.org \
--cc=mingo@redhat.com \
--cc=pabeni@redhat.com \
--cc=viro@ftp.linux.org.uk \
--cc=viro@zeniv.linux.org.uk \
--cc=x86@kernel.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox