public inbox for linux-kernel@vger.kernel.org
 help / color / mirror / Atom feed
* [PATCH 1/2] random: convert to using fops->read_iter()
  2022-05-19 19:31 [PATCHSET " Jens Axboe
@ 2022-05-19 19:31 ` Jens Axboe
  2022-05-19 23:12   ` Jason A. Donenfeld
  0 siblings, 1 reply; 14+ messages in thread
From: Jens Axboe @ 2022-05-19 19:31 UTC (permalink / raw)
  To: tytso, Jason; +Cc: hch, linux-kernel, Jens Axboe

This is a pre-requisite to writing up splice() again for the random
and urandom drivers.

Signed-off-by: Jens Axboe <axboe@kernel.dk>
---
 drivers/char/random.c | 37 +++++++++++++++++++------------------
 1 file changed, 19 insertions(+), 18 deletions(-)

diff --git a/drivers/char/random.c b/drivers/char/random.c
index 4c9adb4f3d5d..529afd31d549 100644
--- a/drivers/char/random.c
+++ b/drivers/char/random.c
@@ -528,11 +528,12 @@ void get_random_bytes(void *buf, size_t nbytes)
 }
 EXPORT_SYMBOL(get_random_bytes);
 
-static ssize_t get_random_bytes_user(void __user *buf, size_t nbytes)
+static ssize_t get_random_bytes_user(struct iov_iter *to)
 {
-	size_t len, left, ret = 0;
+	size_t len, ret = 0;
 	u32 chacha_state[CHACHA_STATE_WORDS];
 	u8 output[CHACHA_BLOCK_SIZE];
+	size_t nbytes = iov_iter_count(to);
 
 	if (!nbytes)
 		return 0;
@@ -549,7 +550,7 @@ static ssize_t get_random_bytes_user(void __user *buf, size_t nbytes)
 	 * the user directly.
 	 */
 	if (nbytes <= CHACHA_KEY_SIZE) {
-		ret = nbytes - copy_to_user(buf, &chacha_state[4], nbytes);
+		ret = copy_to_iter(&chacha_state[4], nbytes, to);
 		goto out_zero_chacha;
 	}
 
@@ -559,13 +560,10 @@ static ssize_t get_random_bytes_user(void __user *buf, size_t nbytes)
 			++chacha_state[13];
 
 		len = min_t(size_t, nbytes, CHACHA_BLOCK_SIZE);
-		left = copy_to_user(buf, output, len);
-		if (left) {
-			ret += len - left;
+		len = copy_to_iter(output, len, to);
+		if (!len)
 			break;
-		}
 
-		buf += len;
 		ret += len;
 		nbytes -= len;
 		if (!nbytes)
@@ -1466,6 +1464,9 @@ static void try_to_generate_entropy(void)
 SYSCALL_DEFINE3(getrandom, char __user *, buf, size_t, count, unsigned int,
 		flags)
 {
+	struct iovec iov = { .iov_base = buf };
+	struct iov_iter iter;
+
 	if (flags & ~(GRND_NONBLOCK | GRND_RANDOM | GRND_INSECURE))
 		return -EINVAL;
 
@@ -1488,7 +1489,9 @@ SYSCALL_DEFINE3(getrandom, char __user *, buf, size_t, count, unsigned int,
 		if (unlikely(ret))
 			return ret;
 	}
-	return get_random_bytes_user(buf, count);
+	iov.iov_len = count;
+	iov_iter_init(&iter, READ, &iov, 1, count);
+	return get_random_bytes_user(&iter);
 }
 
 static __poll_t random_poll(struct file *file, poll_table *wait)
@@ -1540,8 +1543,7 @@ static ssize_t random_write(struct file *file, const char __user *buffer,
 	return (ssize_t)count;
 }
 
-static ssize_t urandom_read(struct file *file, char __user *buf, size_t nbytes,
-			    loff_t *ppos)
+static ssize_t urandom_read_iter(struct kiocb *kiocb, struct iov_iter *to)
 {
 	static int maxwarn = 10;
 
@@ -1556,21 +1558,20 @@ static ssize_t urandom_read(struct file *file, char __user *buf, size_t nbytes,
 		maxwarn--;
 		if (__ratelimit(&urandom_warning))
 			pr_notice("%s: uninitialized urandom read (%zd bytes read)\n",
-				  current->comm, nbytes);
+				  current->comm, iov_iter_count(to));
 	}
 
-	return get_random_bytes_user(buf, nbytes);
+	return get_random_bytes_user(to);
 }
 
-static ssize_t random_read(struct file *file, char __user *buf, size_t nbytes,
-			   loff_t *ppos)
+static ssize_t random_read_iter(struct kiocb *kiocb, struct iov_iter *to)
 {
 	int ret;
 
 	ret = wait_for_random_bytes();
 	if (ret != 0)
 		return ret;
-	return get_random_bytes_user(buf, nbytes);
+	return get_random_bytes_user(to);
 }
 
 static long random_ioctl(struct file *f, unsigned int cmd, unsigned long arg)
@@ -1639,7 +1640,7 @@ static int random_fasync(int fd, struct file *filp, int on)
 }
 
 const struct file_operations random_fops = {
-	.read = random_read,
+	.read_iter = random_read_iter,
 	.write = random_write,
 	.poll = random_poll,
 	.unlocked_ioctl = random_ioctl,
@@ -1649,7 +1650,7 @@ const struct file_operations random_fops = {
 };
 
 const struct file_operations urandom_fops = {
-	.read = urandom_read,
+	.read_iter = urandom_read_iter,
 	.write = random_write,
 	.unlocked_ioctl = random_ioctl,
 	.compat_ioctl = compat_ptr_ioctl,
-- 
2.35.1


^ permalink raw reply related	[flat|nested] 14+ messages in thread

* Re: [PATCH 1/2] random: convert to using fops->read_iter()
  2022-05-19 19:31 ` [PATCH 1/2] random: convert to using fops->read_iter() Jens Axboe
@ 2022-05-19 23:12   ` Jason A. Donenfeld
  2022-05-19 23:20     ` Jason A. Donenfeld
  2022-05-19 23:21     ` Jens Axboe
  0 siblings, 2 replies; 14+ messages in thread
From: Jason A. Donenfeld @ 2022-05-19 23:12 UTC (permalink / raw)
  To: Jens Axboe; +Cc: tytso, hch, linux-kernel

Hi Jens,

On Thu, May 19, 2022 at 01:31:32PM -0600, Jens Axboe wrote:
>  	for (;;) {
>  		chacha20_block(chacha_state, output);
>  		if (unlikely(chacha_state[12] == 0))
>  			++chacha_state[13];
>  
>  		block_len = min_t(size_t, len, CHACHA_BLOCK_SIZE);
> -		left = copy_to_user(ubuf, output, block_len);
> -		if (left) {
> -			ret += block_len - left;
> +		block_len = copy_to_iter(output, block_len, to);
> +		if (!block_len)
>  			break;
> -		}
>  
> -		ubuf += block_len;
>  		ret += block_len;
>  		len -= block_len;
> -		if (!len)
> -			break;
>  
>  		BUILD_BUG_ON(PAGE_SIZE % CHACHA_BLOCK_SIZE != 0);
>  		if (ret % PAGE_SIZE == 0) {
>  			if (signal_pending(current))
>  				break;
>  			cond_resched();
>  		}
>  	}

This isn't quite the same, is it? Before, it would immediately break out
of the loop on any short copy. Now, it will only break out on a zero
copy, which means it's possible that ret % PAGE_SIZE == 0, and there'll
be an unnecessary cond_resched() before copy_to_iter() runs again and
then breaks.

Jason

^ permalink raw reply	[flat|nested] 14+ messages in thread

* Re: [PATCH 1/2] random: convert to using fops->read_iter()
  2022-05-19 23:12   ` Jason A. Donenfeld
@ 2022-05-19 23:20     ` Jason A. Donenfeld
  2022-05-19 23:21       ` Jens Axboe
  2022-05-19 23:21     ` Jens Axboe
  1 sibling, 1 reply; 14+ messages in thread
From: Jason A. Donenfeld @ 2022-05-19 23:20 UTC (permalink / raw)
  To: Jens Axboe; +Cc: tytso, hch, linux-kernel

On Fri, May 20, 2022 at 01:12:04AM +0200, Jason A. Donenfeld wrote:
> Hi Jens,
> 
> On Thu, May 19, 2022 at 01:31:32PM -0600, Jens Axboe wrote:
> >  	for (;;) {
> >  		chacha20_block(chacha_state, output);
> >  		if (unlikely(chacha_state[12] == 0))
> >  			++chacha_state[13];
> >  
> >  		block_len = min_t(size_t, len, CHACHA_BLOCK_SIZE);
> > -		left = copy_to_user(ubuf, output, block_len);
> > -		if (left) {
> > -			ret += block_len - left;
> > +		block_len = copy_to_iter(output, block_len, to);
> > +		if (!block_len)
> >  			break;
> > -		}
> >  
> > -		ubuf += block_len;
> >  		ret += block_len;
> >  		len -= block_len;
> > -		if (!len)
> > -			break;
> >  
> >  		BUILD_BUG_ON(PAGE_SIZE % CHACHA_BLOCK_SIZE != 0);
> >  		if (ret % PAGE_SIZE == 0) {
> >  			if (signal_pending(current))
> >  				break;
> >  			cond_resched();
> >  		}
> >  	}
> 
> This isn't quite the same, is it? Before, it would immediately break out
> of the loop on any short copy. Now, it will only break out on a zero
> copy, which means it's possible that ret % PAGE_SIZE == 0, and there'll
> be an unnecessary cond_resched() before copy_to_iter() runs again and
> then breaks.

Maybe something like the below would do the trick?


static ssize_t get_random_bytes_user(struct iov_iter *to)
{
	size_t block_len, copied, ret = 0, len = iov_iter_count(to);
	u32 chacha_state[CHACHA_STATE_WORDS];
	u8 output[CHACHA_BLOCK_SIZE];

	if (!len)
		return 0;

	/*
	 * Immediately overwrite the ChaCha key at index 4 with random
	 * bytes, in case userspace causes copy_to_user() below to sleep
	 * forever, so that we still retain forward secrecy in that case.
	 */
	crng_make_state(chacha_state, (u8 *)&chacha_state[4], CHACHA_KEY_SIZE);
	/*
	 * However, if we're doing a read of len <= 32, we don't need to
	 * use chacha_state after, so we can simply return those bytes to
	 * the user directly.
	 */
	if (len <= CHACHA_KEY_SIZE) {
		ret = copy_to_iter(&chacha_state[4], len, to);
		goto out_zero_chacha;
	}

	for (;;) {
		chacha20_block(chacha_state, output);
		if (unlikely(chacha_state[12] == 0))
			++chacha_state[13];

		block_len = min_t(size_t, len, CHACHA_BLOCK_SIZE);
		copied = copy_to_iter(output, block_len, to);
		ret += copied;
		if (block_len != copied)
			break;
		len -= copied;

		BUILD_BUG_ON(PAGE_SIZE % CHACHA_BLOCK_SIZE != 0);
		if (ret % PAGE_SIZE == 0) {
			if (signal_pending(current))
				break;
			cond_resched();
		}
	}

	memzero_explicit(output, sizeof(output));
out_zero_chacha:
	memzero_explicit(chacha_state, sizeof(chacha_state));
	return ret ? ret : -EFAULT;
}

^ permalink raw reply	[flat|nested] 14+ messages in thread

* Re: [PATCH 1/2] random: convert to using fops->read_iter()
  2022-05-19 23:12   ` Jason A. Donenfeld
  2022-05-19 23:20     ` Jason A. Donenfeld
@ 2022-05-19 23:21     ` Jens Axboe
  1 sibling, 0 replies; 14+ messages in thread
From: Jens Axboe @ 2022-05-19 23:21 UTC (permalink / raw)
  To: Jason A. Donenfeld; +Cc: tytso, hch, linux-kernel

On 5/19/22 5:12 PM, Jason A. Donenfeld wrote:
> Hi Jens,
> 
> On Thu, May 19, 2022 at 01:31:32PM -0600, Jens Axboe wrote:
>>  	for (;;) {
>>  		chacha20_block(chacha_state, output);
>>  		if (unlikely(chacha_state[12] == 0))
>>  			++chacha_state[13];
>>  
>>  		block_len = min_t(size_t, len, CHACHA_BLOCK_SIZE);
>> -		left = copy_to_user(ubuf, output, block_len);
>> -		if (left) {
>> -			ret += block_len - left;
>> +		block_len = copy_to_iter(output, block_len, to);
>> +		if (!block_len)
>>  			break;
>> -		}
>>  
>> -		ubuf += block_len;
>>  		ret += block_len;
>>  		len -= block_len;
>> -		if (!len)
>> -			break;
>>  
>>  		BUILD_BUG_ON(PAGE_SIZE % CHACHA_BLOCK_SIZE != 0);
>>  		if (ret % PAGE_SIZE == 0) {
>>  			if (signal_pending(current))
>>  				break;
>>  			cond_resched();
>>  		}
>>  	}
> 
> This isn't quite the same, is it? Before, it would immediately break
> out of the loop on any short copy. Now, it will only break out on a
> zero copy, which means it's possible that ret % PAGE_SIZE == 0, and
> there'll be an unnecessary cond_resched() before copy_to_iter() runs
> again and then breaks.

True, we could just make that:

copied = copy_to_iter(output, block_len, to);
if (copied != block_len)
	...

if that's important. Doesn't seem like it would, if you're passing in
invalid memory ranges. Maybe that ret check makes it so that it is
indeed important. I'll make the changes and send out a v2.

-- 
Jens Axboe


^ permalink raw reply	[flat|nested] 14+ messages in thread

* Re: [PATCH 1/2] random: convert to using fops->read_iter()
  2022-05-19 23:20     ` Jason A. Donenfeld
@ 2022-05-19 23:21       ` Jens Axboe
  2022-05-19 23:21         ` Jason A. Donenfeld
  0 siblings, 1 reply; 14+ messages in thread
From: Jens Axboe @ 2022-05-19 23:21 UTC (permalink / raw)
  To: Jason A. Donenfeld; +Cc: tytso, hch, linux-kernel

On 5/19/22 5:20 PM, Jason A. Donenfeld wrote:
> On Fri, May 20, 2022 at 01:12:04AM +0200, Jason A. Donenfeld wrote:
>> Hi Jens,
>>
>> On Thu, May 19, 2022 at 01:31:32PM -0600, Jens Axboe wrote:
>>>  	for (;;) {
>>>  		chacha20_block(chacha_state, output);
>>>  		if (unlikely(chacha_state[12] == 0))
>>>  			++chacha_state[13];
>>>  
>>>  		block_len = min_t(size_t, len, CHACHA_BLOCK_SIZE);
>>> -		left = copy_to_user(ubuf, output, block_len);
>>> -		if (left) {
>>> -			ret += block_len - left;
>>> +		block_len = copy_to_iter(output, block_len, to);
>>> +		if (!block_len)
>>>  			break;
>>> -		}
>>>  
>>> -		ubuf += block_len;
>>>  		ret += block_len;
>>>  		len -= block_len;
>>> -		if (!len)
>>> -			break;
>>>  
>>>  		BUILD_BUG_ON(PAGE_SIZE % CHACHA_BLOCK_SIZE != 0);
>>>  		if (ret % PAGE_SIZE == 0) {
>>>  			if (signal_pending(current))
>>>  				break;
>>>  			cond_resched();
>>>  		}
>>>  	}
>>
>> This isn't quite the same, is it? Before, it would immediately break out
>> of the loop on any short copy. Now, it will only break out on a zero
>> copy, which means it's possible that ret % PAGE_SIZE == 0, and there'll
>> be an unnecessary cond_resched() before copy_to_iter() runs again and
>> then breaks.
> 
> Maybe something like the below would do the trick?
> 
> 
> static ssize_t get_random_bytes_user(struct iov_iter *to)
> {
> 	size_t block_len, copied, ret = 0, len = iov_iter_count(to);
> 	u32 chacha_state[CHACHA_STATE_WORDS];
> 	u8 output[CHACHA_BLOCK_SIZE];
> 
> 	if (!len)
> 		return 0;
> 
> 	/*
> 	 * Immediately overwrite the ChaCha key at index 4 with random
> 	 * bytes, in case userspace causes copy_to_user() below to sleep
> 	 * forever, so that we still retain forward secrecy in that case.
> 	 */
> 	crng_make_state(chacha_state, (u8 *)&chacha_state[4], CHACHA_KEY_SIZE);
> 	/*
> 	 * However, if we're doing a read of len <= 32, we don't need to
> 	 * use chacha_state after, so we can simply return those bytes to
> 	 * the user directly.
> 	 */
> 	if (len <= CHACHA_KEY_SIZE) {
> 		ret = copy_to_iter(&chacha_state[4], len, to);
> 		goto out_zero_chacha;
> 	}
> 
> 	for (;;) {
> 		chacha20_block(chacha_state, output);
> 		if (unlikely(chacha_state[12] == 0))
> 			++chacha_state[13];
> 
> 		block_len = min_t(size_t, len, CHACHA_BLOCK_SIZE);
> 		copied = copy_to_iter(output, block_len, to);
> 		ret += copied;
> 		if (block_len != copied)
> 			break;
> 		len -= copied;

Yep, that looks good! Do you still want a v2?

-- 
Jens Axboe


^ permalink raw reply	[flat|nested] 14+ messages in thread

* Re: [PATCH 1/2] random: convert to using fops->read_iter()
  2022-05-19 23:21       ` Jens Axboe
@ 2022-05-19 23:21         ` Jason A. Donenfeld
  0 siblings, 0 replies; 14+ messages in thread
From: Jason A. Donenfeld @ 2022-05-19 23:21 UTC (permalink / raw)
  To: Jens Axboe; +Cc: Theodore Ts'o, Christoph Hellwig, LKML

On Fri, May 20, 2022 at 1:21 AM Jens Axboe <axboe@kernel.dk> wrote:
>
> On 5/19/22 5:20 PM, Jason A. Donenfeld wrote:
> > On Fri, May 20, 2022 at 01:12:04AM +0200, Jason A. Donenfeld wrote:
> >> Hi Jens,
> >>
> >> On Thu, May 19, 2022 at 01:31:32PM -0600, Jens Axboe wrote:
> >>>     for (;;) {
> >>>             chacha20_block(chacha_state, output);
> >>>             if (unlikely(chacha_state[12] == 0))
> >>>                     ++chacha_state[13];
> >>>
> >>>             block_len = min_t(size_t, len, CHACHA_BLOCK_SIZE);
> >>> -           left = copy_to_user(ubuf, output, block_len);
> >>> -           if (left) {
> >>> -                   ret += block_len - left;
> >>> +           block_len = copy_to_iter(output, block_len, to);
> >>> +           if (!block_len)
> >>>                     break;
> >>> -           }
> >>>
> >>> -           ubuf += block_len;
> >>>             ret += block_len;
> >>>             len -= block_len;
> >>> -           if (!len)
> >>> -                   break;
> >>>
> >>>             BUILD_BUG_ON(PAGE_SIZE % CHACHA_BLOCK_SIZE != 0);
> >>>             if (ret % PAGE_SIZE == 0) {
> >>>                     if (signal_pending(current))
> >>>                             break;
> >>>                     cond_resched();
> >>>             }
> >>>     }
> >>
> >> This isn't quite the same, is it? Before, it would immediately break out
> >> of the loop on any short copy. Now, it will only break out on a zero
> >> copy, which means it's possible that ret % PAGE_SIZE == 0, and there'll
> >> be an unnecessary cond_resched() before copy_to_iter() runs again and
> >> then breaks.
> >
> > Maybe something like the below would do the trick?
> >
> >
> > static ssize_t get_random_bytes_user(struct iov_iter *to)
> > {
> >       size_t block_len, copied, ret = 0, len = iov_iter_count(to);
> >       u32 chacha_state[CHACHA_STATE_WORDS];
> >       u8 output[CHACHA_BLOCK_SIZE];
> >
> >       if (!len)
> >               return 0;
> >
> >       /*
> >        * Immediately overwrite the ChaCha key at index 4 with random
> >        * bytes, in case userspace causes copy_to_user() below to sleep
> >        * forever, so that we still retain forward secrecy in that case.
> >        */
> >       crng_make_state(chacha_state, (u8 *)&chacha_state[4], CHACHA_KEY_SIZE);
> >       /*
> >        * However, if we're doing a read of len <= 32, we don't need to
> >        * use chacha_state after, so we can simply return those bytes to
> >        * the user directly.
> >        */
> >       if (len <= CHACHA_KEY_SIZE) {
> >               ret = copy_to_iter(&chacha_state[4], len, to);
> >               goto out_zero_chacha;
> >       }
> >
> >       for (;;) {
> >               chacha20_block(chacha_state, output);
> >               if (unlikely(chacha_state[12] == 0))
> >                       ++chacha_state[13];
> >
> >               block_len = min_t(size_t, len, CHACHA_BLOCK_SIZE);
> >               copied = copy_to_iter(output, block_len, to);
> >               ret += copied;
> >               if (block_len != copied)
> >                       break;
> >               len -= copied;
>
> Yep, that looks good! Do you still want a v2?

Yes please, thanks.

Jason

^ permalink raw reply	[flat|nested] 14+ messages in thread

* [PATCHSET v2 0/2] Fix splice from random/urandom
@ 2022-05-19 23:31 Jens Axboe
  2022-05-19 23:31 ` [PATCH 1/2] random: convert to using fops->read_iter() Jens Axboe
                   ` (2 more replies)
  0 siblings, 3 replies; 14+ messages in thread
From: Jens Axboe @ 2022-05-19 23:31 UTC (permalink / raw)
  To: tytso, Jason; +Cc: hch, linux-kernel

Hi,

We recently had a failure on a kernel upgrade because splice no longer
works on random/urandom. This is due to:

6e2c7421f02 ("fs: don't allow splice read/write without explicit ops")

which already has more than two handful of Fixes registered to its
name...

Wire up read_iter handling and then hook up splice_read for both of
them as well.

v2: rebase on random git tree

-- 
Jens Axboe



^ permalink raw reply	[flat|nested] 14+ messages in thread

* [PATCH 1/2] random: convert to using fops->read_iter()
  2022-05-19 23:31 [PATCHSET v2 0/2] Fix splice from random/urandom Jens Axboe
@ 2022-05-19 23:31 ` Jens Axboe
  2022-05-20  3:11   ` Al Viro
  2022-05-19 23:31 ` [PATCH 2/2] random: wire up fops->splice_read_iter() Jens Axboe
  2022-05-19 23:56 ` [PATCHSET v2 0/2] Fix splice from random/urandom Jason A. Donenfeld
  2 siblings, 1 reply; 14+ messages in thread
From: Jens Axboe @ 2022-05-19 23:31 UTC (permalink / raw)
  To: tytso, Jason; +Cc: hch, linux-kernel, Jens Axboe

This is a pre-requisite to writing up splice() again for the random
and urandom drivers.

Signed-off-by: Jens Axboe <axboe@kernel.dk>
---
 drivers/char/random.c | 44 ++++++++++++++++++++++---------------------
 1 file changed, 23 insertions(+), 21 deletions(-)

diff --git a/drivers/char/random.c b/drivers/char/random.c
index 0958fa91a964..d000fe6fbb5a 100644
--- a/drivers/char/random.c
+++ b/drivers/char/random.c
@@ -397,11 +397,12 @@ void get_random_bytes(void *buf, size_t len)
 }
 EXPORT_SYMBOL(get_random_bytes);
 
-static ssize_t get_random_bytes_user(void __user *ubuf, size_t len)
+static ssize_t get_random_bytes_user(struct iov_iter *to)
 {
-	size_t block_len, left, ret = 0;
+	size_t block_len, ret = 0;
 	u32 chacha_state[CHACHA_STATE_WORDS];
 	u8 output[CHACHA_BLOCK_SIZE];
+	size_t len = iov_iter_count(to);
 
 	if (!len)
 		return 0;
@@ -418,25 +419,23 @@ static ssize_t get_random_bytes_user(void __user *ubuf, size_t len)
 	 * the user directly.
 	 */
 	if (len <= CHACHA_KEY_SIZE) {
-		ret = len - copy_to_user(ubuf, &chacha_state[4], len);
+		ret = copy_to_iter(&chacha_state[4], len, to);
 		goto out_zero_chacha;
 	}
 
 	for (;;) {
+		size_t copied;
+
 		chacha20_block(chacha_state, output);
 		if (unlikely(chacha_state[12] == 0))
 			++chacha_state[13];
 
 		block_len = min_t(size_t, len, CHACHA_BLOCK_SIZE);
-		left = copy_to_user(ubuf, output, block_len);
-		if (left) {
-			ret += block_len - left;
+		copied = copy_to_iter(output, block_len, to);
+		ret += copied;
+		if (copied != block_len)
 			break;
-		}
-
-		ubuf += block_len;
-		ret += block_len;
-		len -= block_len;
+		len -= copied;
 		if (!len)
 			break;
 
@@ -1248,6 +1247,9 @@ static void __cold try_to_generate_entropy(void)
 
 SYSCALL_DEFINE3(getrandom, char __user *, ubuf, size_t, len, unsigned int, flags)
 {
+	struct iovec iov = { .iov_base = ubuf };
+	struct iov_iter iter;
+
 	if (flags & ~(GRND_NONBLOCK | GRND_RANDOM | GRND_INSECURE))
 		return -EINVAL;
 
@@ -1270,7 +1272,9 @@ SYSCALL_DEFINE3(getrandom, char __user *, ubuf, size_t, len, unsigned int, flags
 		if (unlikely(ret))
 			return ret;
 	}
-	return get_random_bytes_user(ubuf, len);
+	iov.iov_len = len;
+	iov_iter_init(&iter, READ, &iov, 1, len);
+	return get_random_bytes_user(&iter);
 }
 
 static __poll_t random_poll(struct file *file, poll_table *wait)
@@ -1314,8 +1318,7 @@ static ssize_t random_write(struct file *file, const char __user *ubuf,
 	return (ssize_t)len;
 }
 
-static ssize_t urandom_read(struct file *file, char __user *ubuf,
-			    size_t len, loff_t *ppos)
+static ssize_t urandom_read_iter(struct kiocb *kiocb, struct iov_iter *to)
 {
 	static int maxwarn = 10;
 
@@ -1332,22 +1335,21 @@ static ssize_t urandom_read(struct file *file, char __user *ubuf,
 		else if (ratelimit_disable || __ratelimit(&urandom_warning)) {
 			--maxwarn;
 			pr_notice("%s: uninitialized urandom read (%zd bytes read)\n",
-				  current->comm, len);
+				  current->comm, iov_iter_count(to));
 		}
 	}
 
-	return get_random_bytes_user(ubuf, len);
+	return get_random_bytes_user(to);
 }
 
-static ssize_t random_read(struct file *file, char __user *ubuf,
-			   size_t len, loff_t *ppos)
+static ssize_t random_read_iter(struct kiocb *kiocb, struct iov_iter *to)
 {
 	int ret;
 
 	ret = wait_for_random_bytes();
 	if (ret != 0)
 		return ret;
-	return get_random_bytes_user(ubuf, len);
+	return get_random_bytes_user(to);
 }
 
 static long random_ioctl(struct file *f, unsigned int cmd, unsigned long arg)
@@ -1409,7 +1411,7 @@ static int random_fasync(int fd, struct file *filp, int on)
 }
 
 const struct file_operations random_fops = {
-	.read = random_read,
+	.read_iter = random_read_iter,
 	.write = random_write,
 	.poll = random_poll,
 	.unlocked_ioctl = random_ioctl,
@@ -1419,7 +1421,7 @@ const struct file_operations random_fops = {
 };
 
 const struct file_operations urandom_fops = {
-	.read = urandom_read,
+	.read_iter = urandom_read_iter,
 	.write = random_write,
 	.unlocked_ioctl = random_ioctl,
 	.compat_ioctl = compat_ptr_ioctl,
-- 
2.35.1


^ permalink raw reply related	[flat|nested] 14+ messages in thread

* [PATCH 2/2] random: wire up fops->splice_read_iter()
  2022-05-19 23:31 [PATCHSET v2 0/2] Fix splice from random/urandom Jens Axboe
  2022-05-19 23:31 ` [PATCH 1/2] random: convert to using fops->read_iter() Jens Axboe
@ 2022-05-19 23:31 ` Jens Axboe
  2022-05-19 23:56 ` [PATCHSET v2 0/2] Fix splice from random/urandom Jason A. Donenfeld
  2 siblings, 0 replies; 14+ messages in thread
From: Jens Axboe @ 2022-05-19 23:31 UTC (permalink / raw)
  To: tytso, Jason; +Cc: hch, linux-kernel, Jens Axboe

Now that random/urandom is using read_iter, we can wire it up to using
the generic splice read handler.

Fixes: 36e2c7421f02 ("fs: don't allow splice read/write without explicit ops")
Signed-off-by: Jens Axboe <axboe@kernel.dk>
---
 drivers/char/random.c | 2 ++
 1 file changed, 2 insertions(+)

diff --git a/drivers/char/random.c b/drivers/char/random.c
index d000fe6fbb5a..41ca5966aa4f 100644
--- a/drivers/char/random.c
+++ b/drivers/char/random.c
@@ -1418,6 +1418,7 @@ const struct file_operations random_fops = {
 	.compat_ioctl = compat_ptr_ioctl,
 	.fasync = random_fasync,
 	.llseek = noop_llseek,
+	.splice_read = generic_file_splice_read,
 };
 
 const struct file_operations urandom_fops = {
@@ -1427,6 +1428,7 @@ const struct file_operations urandom_fops = {
 	.compat_ioctl = compat_ptr_ioctl,
 	.fasync = random_fasync,
 	.llseek = noop_llseek,
+	.splice_read = generic_file_splice_read,
 };
 
 
-- 
2.35.1


^ permalink raw reply related	[flat|nested] 14+ messages in thread

* Re: [PATCHSET v2 0/2] Fix splice from random/urandom
  2022-05-19 23:31 [PATCHSET v2 0/2] Fix splice from random/urandom Jens Axboe
  2022-05-19 23:31 ` [PATCH 1/2] random: convert to using fops->read_iter() Jens Axboe
  2022-05-19 23:31 ` [PATCH 2/2] random: wire up fops->splice_read_iter() Jens Axboe
@ 2022-05-19 23:56 ` Jason A. Donenfeld
  2022-05-20  0:00   ` Jens Axboe
  2 siblings, 1 reply; 14+ messages in thread
From: Jason A. Donenfeld @ 2022-05-19 23:56 UTC (permalink / raw)
  To: Jens Axboe; +Cc: Theodore Ts'o, Christoph Hellwig, LKML

Hi Jens,

This patchset seems to work. I'll queue it up with some cosmetic
changes. Thanks a lot.

Jason

^ permalink raw reply	[flat|nested] 14+ messages in thread

* Re: [PATCHSET v2 0/2] Fix splice from random/urandom
  2022-05-19 23:56 ` [PATCHSET v2 0/2] Fix splice from random/urandom Jason A. Donenfeld
@ 2022-05-20  0:00   ` Jens Axboe
  0 siblings, 0 replies; 14+ messages in thread
From: Jens Axboe @ 2022-05-20  0:00 UTC (permalink / raw)
  To: Jason A. Donenfeld; +Cc: Theodore Ts'o, Christoph Hellwig, LKML

On 5/19/22 5:56 PM, Jason A. Donenfeld wrote:
> Hi Jens,
> 
> This patchset seems to work. I'll queue it up with some cosmetic
> changes. Thanks a lot.

Great, thanks for the quick response!

-- 
Jens Axboe


^ permalink raw reply	[flat|nested] 14+ messages in thread

* Re: [PATCH 1/2] random: convert to using fops->read_iter()
  2022-05-19 23:31 ` [PATCH 1/2] random: convert to using fops->read_iter() Jens Axboe
@ 2022-05-20  3:11   ` Al Viro
  2022-05-20  3:26     ` Jens Axboe
  2022-05-20  9:14     ` Jason A. Donenfeld
  0 siblings, 2 replies; 14+ messages in thread
From: Al Viro @ 2022-05-20  3:11 UTC (permalink / raw)
  To: Jens Axboe; +Cc: tytso, Jason, hch, linux-kernel

On Thu, May 19, 2022 at 05:31:36PM -0600, Jens Axboe wrote:

> @@ -418,25 +419,23 @@ static ssize_t get_random_bytes_user(void __user *ubuf, size_t len)
>  	 * the user directly.
>  	 */
>  	if (len <= CHACHA_KEY_SIZE) {
> -		ret = len - copy_to_user(ubuf, &chacha_state[4], len);
> +		ret = copy_to_iter(&chacha_state[4], len, to);
>  		goto out_zero_chacha;
>  	}
>  
>  	for (;;) {
> +		size_t copied;
> +
>  		chacha20_block(chacha_state, output);
>  		if (unlikely(chacha_state[12] == 0))
>  			++chacha_state[13];
>  
>  		block_len = min_t(size_t, len, CHACHA_BLOCK_SIZE);
> -		left = copy_to_user(ubuf, output, block_len);
> -		if (left) {
> -			ret += block_len - left;
> +		copied = copy_to_iter(output, block_len, to);
> +		ret += copied;
> +		if (copied != block_len)
>  			break;
> -		}

		copied = copy_to_iter(output, CHACHA_BLOCK_SIZE, to);
		ret += copied;
		if (copied != CHACHA_BLOCK_SIZE) {
			if (!ret)
				ret = -EFAULT;
			break;
		}
	}

>  SYSCALL_DEFINE3(getrandom, char __user *, ubuf, size_t, len, unsigned int, flags)
>  {
> +	struct iovec iov = { .iov_base = ubuf };
> +	struct iov_iter iter;

	import_single_range(READ, ubuf, len, &iov, &iter)

(note, BTW, that this'll cap len)

^ permalink raw reply	[flat|nested] 14+ messages in thread

* Re: [PATCH 1/2] random: convert to using fops->read_iter()
  2022-05-20  3:11   ` Al Viro
@ 2022-05-20  3:26     ` Jens Axboe
  2022-05-20  9:14     ` Jason A. Donenfeld
  1 sibling, 0 replies; 14+ messages in thread
From: Jens Axboe @ 2022-05-20  3:26 UTC (permalink / raw)
  To: Al Viro; +Cc: tytso, Jason, hch, linux-kernel

On 5/19/22 9:11 PM, Al Viro wrote:
> On Thu, May 19, 2022 at 05:31:36PM -0600, Jens Axboe wrote:
> 
>> @@ -418,25 +419,23 @@ static ssize_t get_random_bytes_user(void __user *ubuf, size_t len)
>>  	 * the user directly.
>>  	 */
>>  	if (len <= CHACHA_KEY_SIZE) {
>> -		ret = len - copy_to_user(ubuf, &chacha_state[4], len);
>> +		ret = copy_to_iter(&chacha_state[4], len, to);
>>  		goto out_zero_chacha;
>>  	}
>>  
>>  	for (;;) {
>> +		size_t copied;
>> +
>>  		chacha20_block(chacha_state, output);
>>  		if (unlikely(chacha_state[12] == 0))
>>  			++chacha_state[13];
>>  
>>  		block_len = min_t(size_t, len, CHACHA_BLOCK_SIZE);
>> -		left = copy_to_user(ubuf, output, block_len);
>> -		if (left) {
>> -			ret += block_len - left;
>> +		copied = copy_to_iter(output, block_len, to);
>> +		ret += copied;
>> +		if (copied != block_len)
>>  			break;
>> -		}
> 
> 		copied = copy_to_iter(output, CHACHA_BLOCK_SIZE, to);
> 		ret += copied;
> 		if (copied != CHACHA_BLOCK_SIZE) {
> 			if (!ret)
> 				ret = -EFAULT;
> 			break;
> 		}
> 	}
> 
>>  SYSCALL_DEFINE3(getrandom, char __user *, ubuf, size_t, len, unsigned int, flags)
>>  {
>> +	struct iovec iov = { .iov_base = ubuf };
>> +	struct iov_iter iter;
> 
> 	import_single_range(READ, ubuf, len, &iov, &iter)
> 
> (note, BTW, that this'll cap len)

Yep both of these are good changes.

-- 
Jens Axboe


^ permalink raw reply	[flat|nested] 14+ messages in thread

* Re: [PATCH 1/2] random: convert to using fops->read_iter()
  2022-05-20  3:11   ` Al Viro
  2022-05-20  3:26     ` Jens Axboe
@ 2022-05-20  9:14     ` Jason A. Donenfeld
  1 sibling, 0 replies; 14+ messages in thread
From: Jason A. Donenfeld @ 2022-05-20  9:14 UTC (permalink / raw)
  To: Al Viro; +Cc: Jens Axboe, tytso, hch, linux-kernel

Hi Al,

On Fri, May 20, 2022 at 03:11:06AM +0000, Al Viro wrote:
> >  SYSCALL_DEFINE3(getrandom, char __user *, ubuf, size_t, len,
> >  unsigned int, flags) { +	struct iovec iov = { .iov_base = ubuf };
> >  +	struct iov_iter iter;
> 
> 	import_single_range(READ, ubuf, len, &iov, &iter)
> 
> (note, BTW, that this'll cap len)

I'll incorporate this and send a v4. import_single_range does an
access_ok(), but I would hope that copy_to_iter() also does similar
checks. Does that make this less efficient?

Jason

^ permalink raw reply	[flat|nested] 14+ messages in thread

end of thread, other threads:[~2022-05-20  9:15 UTC | newest]

Thread overview: 14+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2022-05-19 23:31 [PATCHSET v2 0/2] Fix splice from random/urandom Jens Axboe
2022-05-19 23:31 ` [PATCH 1/2] random: convert to using fops->read_iter() Jens Axboe
2022-05-20  3:11   ` Al Viro
2022-05-20  3:26     ` Jens Axboe
2022-05-20  9:14     ` Jason A. Donenfeld
2022-05-19 23:31 ` [PATCH 2/2] random: wire up fops->splice_read_iter() Jens Axboe
2022-05-19 23:56 ` [PATCHSET v2 0/2] Fix splice from random/urandom Jason A. Donenfeld
2022-05-20  0:00   ` Jens Axboe
  -- strict thread matches above, loose matches on Subject: below --
2022-05-19 19:31 [PATCHSET " Jens Axboe
2022-05-19 19:31 ` [PATCH 1/2] random: convert to using fops->read_iter() Jens Axboe
2022-05-19 23:12   ` Jason A. Donenfeld
2022-05-19 23:20     ` Jason A. Donenfeld
2022-05-19 23:21       ` Jens Axboe
2022-05-19 23:21         ` Jason A. Donenfeld
2022-05-19 23:21     ` Jens Axboe

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox