From mboxrd@z Thu Jan 1 00:00:00 1970 From: john.hubbard@gmail.com Date: Wed, 07 Aug 2019 01:33:30 +0000 Subject: [PATCH v3 31/41] mm/process_vm_access.c: convert put_page() to put_user_page*() Message-Id: <20190807013340.9706-32-jhubbard@nvidia.com> List-Id: References: <20190807013340.9706-1-jhubbard@nvidia.com> In-Reply-To: <20190807013340.9706-1-jhubbard@nvidia.com> MIME-Version: 1.0 Content-Type: text/plain; charset="us-ascii" Content-Transfer-Encoding: 7bit To: Andrew Morton Cc: linux-fbdev@vger.kernel.org, Jan Kara , kvm@vger.kernel.org, Dave Hansen , Dave Chinner , dri-devel@lists.freedesktop.org, linux-mm@kvack.org, sparclinux@vger.kernel.org, Ira Weiny , ceph-devel@vger.kernel.org, devel@driverdev.osuosl.org, rds-devel@oss.oracle.com, linux-rdma@vger.kernel.org, x86@kernel.org, amd-gfx@lists.freedesktop.org, Ingo Molnar , Christoph Hellwig , Jason Gunthorpe , Rashika Kheria , xen-devel@lists.xenproject.org, devel@lists.orangefs.org, linux-media@vger.kernel.org, Andrea Arcangeli , John Hubbard , intel-gfx@lists.freedesktop.org, linux-block@vger.kernel.org, =?UTF-8?q?J=C3=A9r=C3=B4me=20Glisse?= , Mike Rapoport From: John Hubbard For pages that were retained via get_user_pages*(), release those pages via the new put_user_page*() routines, instead of via put_page() or release_pages(). This is part a tree-wide conversion, as described in commit fc1d8e7cca2d ("mm: introduce put_user_page*(), placeholder versions"). Cc: Al Viro Cc: Andrea Arcangeli Cc: Christopher Yeoh Cc: Dave Hansen Cc: Heiko Carstens Cc: Ingo Molnar Cc: Jann Horn Cc: Lorenzo Stoakes Cc: Mathieu Desnoyers Cc: Mike Rapoport Cc: Rashika Kheria Signed-off-by: John Hubbard --- mm/process_vm_access.c | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/mm/process_vm_access.c b/mm/process_vm_access.c index 357aa7bef6c0..4d29d54ec93f 100644 --- a/mm/process_vm_access.c +++ b/mm/process_vm_access.c @@ -96,7 +96,7 @@ static int process_vm_rw_single_vec(unsigned long addr, flags |= FOLL_WRITE; while (!rc && nr_pages && iov_iter_count(iter)) { - int pages = min(nr_pages, max_pages_per_loop); + int pinned_pages = min(nr_pages, max_pages_per_loop); int locked = 1; size_t bytes; @@ -106,14 +106,15 @@ static int process_vm_rw_single_vec(unsigned long addr, * current/current->mm */ down_read(&mm->mmap_sem); - pages = get_user_pages_remote(task, mm, pa, pages, flags, - process_pages, NULL, &locked); + pinned_pages = get_user_pages_remote(task, mm, pa, pinned_pages, + flags, process_pages, NULL, + &locked); if (locked) up_read(&mm->mmap_sem); - if (pages <= 0) + if (pinned_pages <= 0) return -EFAULT; - bytes = pages * PAGE_SIZE - start_offset; + bytes = pinned_pages * PAGE_SIZE - start_offset; if (bytes > len) bytes = len; @@ -122,10 +123,9 @@ static int process_vm_rw_single_vec(unsigned long addr, vm_write); len -= bytes; start_offset = 0; - nr_pages -= pages; - pa += pages * PAGE_SIZE; - while (pages) - put_page(process_pages[--pages]); + nr_pages -= pinned_pages; + pa += pinned_pages * PAGE_SIZE; + put_user_pages(process_pages, pinned_pages); } return rc; -- 2.22.0