linux-nfs.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
From: Roberto Bergantinos Corpas <rbergant@redhat.com>
To: "J. Bruce Fields" <bfields@fieldses.org>
Cc: linux-nfs@vger.kernel.org
Subject: Re: [PATCH] sunrpc : make RPC channel buffer dynamic for slow case
Date: Sat, 21 Nov 2020 11:54:30 +0100	[thread overview]
Message-ID: <CACWnjLxiCTAkxBca_NFrUSPCq_g4y0yNaHuNKX+Rwr=-xPhibw@mail.gmail.com> (raw)
In-Reply-To: <20201106215128.GD26028@fieldses.org>

Hi Bruce,

  Sorry for late response as well.

    Ok, here's a possible patch, let me know your thoughts

diff --git a/net/sunrpc/cache.c b/net/sunrpc/cache.c
index baef5ee43dbb..1347ecae9c84 100644
--- a/net/sunrpc/cache.c
+++ b/net/sunrpc/cache.c
@@ -777,7 +777,6 @@ void cache_clean_deferred(void *owner)
  */

 static DEFINE_SPINLOCK(queue_lock);
-static DEFINE_MUTEX(queue_io_mutex);

 struct cache_queue {
        struct list_head        list;
@@ -905,44 +904,26 @@ static ssize_t cache_do_downcall(char *kaddr,
const char __user *buf,
        return ret;
 }

-static ssize_t cache_slow_downcall(const char __user *buf,
-                                  size_t count, struct cache_detail *cd)
-{
-       static char write_buf[8192]; /* protected by queue_io_mutex */
-       ssize_t ret = -EINVAL;
-
-       if (count >= sizeof(write_buf))
-               goto out;
-       mutex_lock(&queue_io_mutex);
-       ret = cache_do_downcall(write_buf, buf, count, cd);
-       mutex_unlock(&queue_io_mutex);
-out:
-       return ret;
-}
-
 static ssize_t cache_downcall(struct address_space *mapping,
                              const char __user *buf,
                              size_t count, struct cache_detail *cd)
 {
-       struct page *page;
-       char *kaddr;
+       char *write_buf;
        ssize_t ret = -ENOMEM;

-       if (count >= PAGE_SIZE)
-               goto out_slow;
+       if (count >= 32768) { /* 32k is max userland buffer, lets
check anyway */
+               ret = -EINVAL;
+               goto out;
+       }

-       page = find_or_create_page(mapping, 0, GFP_KERNEL);
-       if (!page)
-               goto out_slow;
+       write_buf = kvmalloc(count + 1, GFP_KERNEL);
+       if (!write_buf)
+               goto out;

-       kaddr = kmap(page);
-       ret = cache_do_downcall(kaddr, buf, count, cd);
-       kunmap(page);
-       unlock_page(page);
-       put_page(page);
+       ret = cache_do_downcall(write_buf, buf, count, cd);
+       kvfree(write_buf);
+out:
        return ret;
-out_slow:
-       return cache_slow_downcall(buf, count, cd);
 }

 static ssize_t cache_write(struct file *filp, const char __user *buf,

On Fri, Nov 6, 2020 at 10:51 PM J. Bruce Fields <bfields@fieldses.org> wrote:
>
> On Mon, Oct 26, 2020 at 04:05:30PM +0100, Roberto Bergantinos Corpas wrote:
> > RPC channel buffer size for slow case (user buffer bigger than
> > one page) can be converted into dymanic and also allows us to
> > prescind from queue_io_mutex
>
> Sorry for the slow response.
>
> Let's just remove cache_slow_downcall and the find_or_create_page()
> thing and just do a kvmalloc() from the start.  I don't understand why
> we need to be more complicated.
>
> --b.
>
> >
> > Signed-off-by: Roberto Bergantinos Corpas <rbergant@redhat.com>
> > ---
> >  net/sunrpc/cache.c | 13 ++++++++-----
> >  1 file changed, 8 insertions(+), 5 deletions(-)
> >
> > diff --git a/net/sunrpc/cache.c b/net/sunrpc/cache.c
> > index baef5ee43dbb..325393f75e17 100644
> > --- a/net/sunrpc/cache.c
> > +++ b/net/sunrpc/cache.c
> > @@ -777,7 +777,6 @@ void cache_clean_deferred(void *owner)
> >   */
> >
> >  static DEFINE_SPINLOCK(queue_lock);
> > -static DEFINE_MUTEX(queue_io_mutex);
> >
> >  struct cache_queue {
> >       struct list_head        list;
> > @@ -908,14 +907,18 @@ static ssize_t cache_do_downcall(char *kaddr, const char __user *buf,
> >  static ssize_t cache_slow_downcall(const char __user *buf,
> >                                  size_t count, struct cache_detail *cd)
> >  {
> > -     static char write_buf[8192]; /* protected by queue_io_mutex */
> > +     char *write_buf;
> >       ssize_t ret = -EINVAL;
> >
> > -     if (count >= sizeof(write_buf))
> > +     if (count >= 32768) /* 32k is max userland buffer, lets check anyway */
> >               goto out;
> > -     mutex_lock(&queue_io_mutex);
> > +
> > +     write_buf = kvmalloc(count + 1, GFP_KERNEL);
> > +     if (!write_buf)
> > +             return -ENOMEM;
> > +
> >       ret = cache_do_downcall(write_buf, buf, count, cd);
> > -     mutex_unlock(&queue_io_mutex);
> > +     kvfree(write_buf);
> >  out:
> >       return ret;
> >  }
> > --
> > 2.21.0
>


  reply	other threads:[~2020-11-21 10:55 UTC|newest]

Thread overview: 6+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2020-10-26 15:05 [PATCH] sunrpc : make RPC channel buffer dynamic for slow case Roberto Bergantinos Corpas
2020-11-06 21:51 ` J. Bruce Fields
2020-11-21 10:54   ` Roberto Bergantinos Corpas [this message]
2020-11-23 15:36     ` J. Bruce Fields
2020-11-23 15:48       ` Chuck Lever
2020-11-23 16:05         ` Bruce Fields

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to='CACWnjLxiCTAkxBca_NFrUSPCq_g4y0yNaHuNKX+Rwr=-xPhibw@mail.gmail.com' \
    --to=rbergant@redhat.com \
    --cc=bfields@fieldses.org \
    --cc=linux-nfs@vger.kernel.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).