From: Eric Farman <farman@linux.ibm.com>
To: Matthew Rosato <mjrosato@linux.ibm.com>,
Halil Pasic <pasic@linux.ibm.com>
Cc: Heiko Carstens <hca@linux.ibm.com>,
Vasily Gorbik <gor@linux.ibm.com>,
Alexander Gordeev <agordeev@linux.ibm.com>,
Christian Borntraeger <borntraeger@linux.ibm.com>,
Sven Schnelle <svens@linux.ibm.com>,
Vineeth Vijayan <vneethv@linux.ibm.com>,
Peter Oberparleiter <oberpar@linux.ibm.com>,
linux-s390@vger.kernel.org, kvm@vger.kernel.org,
Eric Farman <farman@linux.ibm.com>
Subject: [PATCH v2 15/16] vfio/ccw: don't group contiguous pages on 2K IDAWs
Date: Tue, 20 Dec 2022 18:10:07 +0100 [thread overview]
Message-ID: <20221220171008.1362680-16-farman@linux.ibm.com> (raw)
In-Reply-To: <20221220171008.1362680-1-farman@linux.ibm.com>
The vfio_pin_pages() interface allows contiguous pages to be
pinned as a single request, which is great for the 4K pages
that are normally processed. Old IDA formats operate on 2K
chunks, which makes this logic more difficult.
Since these formats are rare, let's just invoke the page
pinning one-at-a-time, instead of trying to group them.
We can rework this code at a later date if needed.
Signed-off-by: Eric Farman <farman@linux.ibm.com>
Reviewed-by: Matthew Rosato <mjrosato@linux.ibm.com>
---
drivers/s390/cio/vfio_ccw_cp.c | 30 ++++++++++++++++++++----------
1 file changed, 20 insertions(+), 10 deletions(-)
diff --git a/drivers/s390/cio/vfio_ccw_cp.c b/drivers/s390/cio/vfio_ccw_cp.c
index 52e5abce5827..098d1e9f0c97 100644
--- a/drivers/s390/cio/vfio_ccw_cp.c
+++ b/drivers/s390/cio/vfio_ccw_cp.c
@@ -83,12 +83,13 @@ static int page_array_alloc(struct page_array *pa, unsigned int len)
* @pa: page_array on which to perform the operation
* @vdev: the vfio device to perform the operation
* @pa_nr: number of user pages to unpin
+ * @unaligned: were pages unaligned on the pin request
*
* Only unpin if any pages were pinned to begin with, i.e. pa_nr > 0,
* otherwise only clear pa->pa_nr
*/
static void page_array_unpin(struct page_array *pa,
- struct vfio_device *vdev, int pa_nr)
+ struct vfio_device *vdev, int pa_nr, bool unaligned)
{
int unpinned = 0, npage = 1;
@@ -97,7 +98,8 @@ static void page_array_unpin(struct page_array *pa,
dma_addr_t *last = &first[npage];
if (unpinned + npage < pa_nr &&
- *first + npage * PAGE_SIZE == *last) {
+ *first + npage * PAGE_SIZE == *last &&
+ !unaligned) {
npage++;
continue;
}
@@ -114,12 +116,19 @@ static void page_array_unpin(struct page_array *pa,
* page_array_pin() - Pin user pages in memory
* @pa: page_array on which to perform the operation
* @vdev: the vfio device to perform pin operations
+ * @unaligned: are pages aligned to 4K boundary?
*
* Returns number of pages pinned upon success.
* If the pin request partially succeeds, or fails completely,
* all pages are left unpinned and a negative error value is returned.
+ *
+ * Requests to pin "aligned" pages can be coalesced into a single
+ * vfio_pin_pages request for the sake of efficiency, based on the
+ * expectation of 4K page requests. Unaligned requests are probably
+ * dealing with 2K "pages", and cannot be coalesced without
+ * reworking this logic to incorporate that math.
*/
-static int page_array_pin(struct page_array *pa, struct vfio_device *vdev)
+static int page_array_pin(struct page_array *pa, struct vfio_device *vdev, bool unaligned)
{
int pinned = 0, npage = 1;
int ret = 0;
@@ -129,7 +138,8 @@ static int page_array_pin(struct page_array *pa, struct vfio_device *vdev)
dma_addr_t *last = &first[npage];
if (pinned + npage < pa->pa_nr &&
- *first + npage * PAGE_SIZE == *last) {
+ *first + npage * PAGE_SIZE == *last &&
+ !unaligned) {
npage++;
continue;
}
@@ -151,14 +161,14 @@ static int page_array_pin(struct page_array *pa, struct vfio_device *vdev)
return ret;
err_out:
- page_array_unpin(pa, vdev, pinned);
+ page_array_unpin(pa, vdev, pinned, unaligned);
return ret;
}
/* Unpin the pages before releasing the memory. */
-static void page_array_unpin_free(struct page_array *pa, struct vfio_device *vdev)
+static void page_array_unpin_free(struct page_array *pa, struct vfio_device *vdev, bool unaligned)
{
- page_array_unpin(pa, vdev, pa->pa_nr);
+ page_array_unpin(pa, vdev, pa->pa_nr, unaligned);
kfree(pa->pa_page);
kfree(pa->pa_iova);
}
@@ -640,7 +650,7 @@ static int ccwchain_fetch_ccw(struct ccw1 *ccw,
}
if (ccw_does_data_transfer(ccw)) {
- ret = page_array_pin(pa, vdev);
+ ret = page_array_pin(pa, vdev, idal_is_2k(cp));
if (ret < 0)
goto out_unpin;
} else {
@@ -656,7 +666,7 @@ static int ccwchain_fetch_ccw(struct ccw1 *ccw,
return 0;
out_unpin:
- page_array_unpin_free(pa, vdev);
+ page_array_unpin_free(pa, vdev, idal_is_2k(cp));
out_free_idaws:
kfree(idaws);
out_init:
@@ -754,7 +764,7 @@ void cp_free(struct channel_program *cp)
cp->initialized = false;
list_for_each_entry_safe(chain, temp, &cp->ccwchain_list, next) {
for (i = 0; i < chain->ch_len; i++) {
- page_array_unpin_free(&chain->ch_pa[i], vdev);
+ page_array_unpin_free(&chain->ch_pa[i], vdev, idal_is_2k(cp));
ccwchain_cda_free(chain, i);
}
ccwchain_free(chain);
--
2.34.1
next prev parent reply other threads:[~2022-12-20 17:11 UTC|newest]
Thread overview: 20+ messages / expand[flat|nested] mbox.gz Atom feed top
2022-12-20 17:09 [PATCH v2 00/16] vfio/ccw: channel program cleanup Eric Farman
2022-12-20 17:09 ` [PATCH v2 01/16] vfio/ccw: cleanup some of the mdev commentary Eric Farman
2022-12-20 17:09 ` [PATCH v2 02/16] vfio/ccw: simplify the cp_get_orb interface Eric Farman
2022-12-20 17:09 ` [PATCH v2 03/16] vfio/ccw: allow non-zero storage keys Eric Farman
2022-12-20 17:09 ` [PATCH v2 04/16] vfio/ccw: move where IDA flag is set in ORB Eric Farman
2022-12-20 17:09 ` [PATCH v2 05/16] vfio/ccw: replace copy_from_iova with vfio_dma_rw Eric Farman
2022-12-20 17:09 ` [PATCH v2 06/16] vfio/ccw: simplify CCW chain fetch routines Eric Farman
2022-12-20 17:09 ` [PATCH v2 07/16] vfio/ccw: remove unnecessary malloc alignment Eric Farman
2022-12-20 17:22 ` Matthew Rosato
2022-12-20 17:10 ` [PATCH v2 08/16] vfio/ccw: pass page count to page_array struct Eric Farman
2022-12-20 17:10 ` [PATCH v2 09/16] vfio/ccw: populate page_array struct inline Eric Farman
2022-12-20 17:10 ` [PATCH v2 10/16] vfio/ccw: refactor the idaw counter Eric Farman
2022-12-20 17:10 ` [PATCH v2 11/16] vfio/ccw: read only one Format-1 IDAW Eric Farman
2022-12-20 17:24 ` Matthew Rosato
2022-12-20 17:10 ` [PATCH v2 12/16] vfio/ccw: calculate number of IDAWs regardless of format Eric Farman
2022-12-20 17:10 ` [PATCH v2 13/16] vfio/ccw: allocate/populate the guest idal Eric Farman
2022-12-20 17:45 ` Matthew Rosato
2022-12-20 17:10 ` [PATCH v2 14/16] vfio/ccw: handle a guest Format-1 IDAL Eric Farman
2022-12-20 17:10 ` Eric Farman [this message]
2022-12-20 17:10 ` [PATCH v2 16/16] vfio/ccw: remove old IDA format restrictions Eric Farman
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20221220171008.1362680-16-farman@linux.ibm.com \
--to=farman@linux.ibm.com \
--cc=agordeev@linux.ibm.com \
--cc=borntraeger@linux.ibm.com \
--cc=gor@linux.ibm.com \
--cc=hca@linux.ibm.com \
--cc=kvm@vger.kernel.org \
--cc=linux-s390@vger.kernel.org \
--cc=mjrosato@linux.ibm.com \
--cc=oberpar@linux.ibm.com \
--cc=pasic@linux.ibm.com \
--cc=svens@linux.ibm.com \
--cc=vneethv@linux.ibm.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox