* [PATCH v2 1/7] net/9p: show error message if user 'msize' cannot be satisfied
2021-09-20 16:43 [PATCH v2 0/7] net/9p: remove msize limit in virtio transport Christian Schoenebeck
@ 2021-09-20 16:43 ` Christian Schoenebeck
2021-09-20 16:43 ` [PATCH v2 2/7] 9p/trans_virtio: separate allocation of scatter gather list Christian Schoenebeck
` (5 subsequent siblings)
6 siblings, 0 replies; 11+ messages in thread
From: Christian Schoenebeck @ 2021-09-20 16:43 UTC (permalink / raw)
To: v9fs-developer
Cc: netdev, Dominique Martinet, Eric Van Hensbergen, Latchesar Ionkov,
Greg Kurz, Vivek Goyal
If user supplied a large value with the 'msize' option, then
client would silently limit that 'msize' value to the maximum
value supported by transport. That's a bit confusing for users
of not having any indication why the preferred 'msize' value
could not be satisfied.
Reported-by: Vivek Goyal <vgoyal@redhat.com>
Signed-off-by: Christian Schoenebeck <linux_oss@crudebyte.com>
---
net/9p/client.c | 7 ++++++-
1 file changed, 6 insertions(+), 1 deletion(-)
diff --git a/net/9p/client.c b/net/9p/client.c
index 213f12ed76cd..4f4fd2098a30 100644
--- a/net/9p/client.c
+++ b/net/9p/client.c
@@ -1044,8 +1044,13 @@ struct p9_client *p9_client_create(const char *dev_name, char *options)
if (err)
goto put_trans;
- if (clnt->msize > clnt->trans_mod->maxsize)
+ if (clnt->msize > clnt->trans_mod->maxsize) {
clnt->msize = clnt->trans_mod->maxsize;
+ pr_info("Limiting 'msize' to %d as this is the maximum "
+ "supported by transport %s\n",
+ clnt->msize, clnt->trans_mod->name
+ );
+ }
if (clnt->msize < 4096) {
p9_debug(P9_DEBUG_ERROR,
--
2.20.1
^ permalink raw reply related [flat|nested] 11+ messages in thread* [PATCH v2 2/7] 9p/trans_virtio: separate allocation of scatter gather list
2021-09-20 16:43 [PATCH v2 0/7] net/9p: remove msize limit in virtio transport Christian Schoenebeck
2021-09-20 16:43 ` [PATCH v2 1/7] net/9p: show error message if user 'msize' cannot be satisfied Christian Schoenebeck
@ 2021-09-20 16:43 ` Christian Schoenebeck
2021-09-20 16:43 ` [PATCH v2 3/7] 9p/trans_virtio: turn amount of sg lists into runtime info Christian Schoenebeck
` (4 subsequent siblings)
6 siblings, 0 replies; 11+ messages in thread
From: Christian Schoenebeck @ 2021-09-20 16:43 UTC (permalink / raw)
To: v9fs-developer
Cc: netdev, Dominique Martinet, Eric Van Hensbergen, Latchesar Ionkov,
Greg Kurz, Vivek Goyal
The scatter gather list in struct virtio_chan currently
resides as compile-time constant size array directly within the
contiguous struct virtio_chan's memory space.
Separate memory space and allocation of the scatter gather list
from memory space and allocation of struct virtio_chan.
Signed-off-by: Christian Schoenebeck <linux_oss@crudebyte.com>
---
net/9p/trans_virtio.c | 13 ++++++++++++-
1 file changed, 12 insertions(+), 1 deletion(-)
diff --git a/net/9p/trans_virtio.c b/net/9p/trans_virtio.c
index 490a4c900339..1dbe2e921bb8 100644
--- a/net/9p/trans_virtio.c
+++ b/net/9p/trans_virtio.c
@@ -77,7 +77,7 @@ struct virtio_chan {
*/
unsigned long p9_max_pages;
/* Scatterlist: can be too big for stack. */
- struct scatterlist sg[VIRTQUEUE_NUM];
+ struct scatterlist *sg;
/**
* @tag: name to identify a mount null terminated
*/
@@ -574,6 +574,14 @@ static int p9_virtio_probe(struct virtio_device *vdev)
goto fail;
}
+ chan->sg = kmalloc_array(VIRTQUEUE_NUM,
+ sizeof(struct scatterlist), GFP_KERNEL);
+ if (!chan->sg) {
+ pr_err("Failed to allocate virtio 9P channel\n");
+ err = -ENOMEM;
+ goto out_free_chan_shallow;
+ }
+
chan->vdev = vdev;
/* We expect one virtqueue, for requests. */
@@ -635,6 +643,8 @@ static int p9_virtio_probe(struct virtio_device *vdev)
out_free_vq:
vdev->config->del_vqs(vdev);
out_free_chan:
+ kfree(chan->sg);
+out_free_chan_shallow:
kfree(chan);
fail:
return err;
@@ -728,6 +738,7 @@ static void p9_virtio_remove(struct virtio_device *vdev)
kobject_uevent(&(vdev->dev.kobj), KOBJ_CHANGE);
kfree(chan->tag);
kfree(chan->vc_wq);
+ kfree(chan->sg);
kfree(chan);
}
--
2.20.1
^ permalink raw reply related [flat|nested] 11+ messages in thread* [PATCH v2 3/7] 9p/trans_virtio: turn amount of sg lists into runtime info
2021-09-20 16:43 [PATCH v2 0/7] net/9p: remove msize limit in virtio transport Christian Schoenebeck
2021-09-20 16:43 ` [PATCH v2 1/7] net/9p: show error message if user 'msize' cannot be satisfied Christian Schoenebeck
2021-09-20 16:43 ` [PATCH v2 2/7] 9p/trans_virtio: separate allocation of scatter gather list Christian Schoenebeck
@ 2021-09-20 16:43 ` Christian Schoenebeck
2021-09-20 16:44 ` [PATCH v2 4/7] 9p/trans_virtio: introduce struct virtqueue_sg Christian Schoenebeck
` (3 subsequent siblings)
6 siblings, 0 replies; 11+ messages in thread
From: Christian Schoenebeck @ 2021-09-20 16:43 UTC (permalink / raw)
To: v9fs-developer
Cc: netdev, Dominique Martinet, Eric Van Hensbergen, Latchesar Ionkov,
Greg Kurz, Vivek Goyal
The size of scatter/gather lists used by the virtio transport is
currently hard coded. Refactor this to using a runtime variable.
Signed-off-by: Christian Schoenebeck <linux_oss@crudebyte.com>
---
net/9p/trans_virtio.c | 23 +++++++++++++----------
1 file changed, 13 insertions(+), 10 deletions(-)
diff --git a/net/9p/trans_virtio.c b/net/9p/trans_virtio.c
index 1dbe2e921bb8..3347d35a5e6e 100644
--- a/net/9p/trans_virtio.c
+++ b/net/9p/trans_virtio.c
@@ -36,7 +36,7 @@
#include <linux/virtio_9p.h>
#include "trans_common.h"
-#define VIRTQUEUE_NUM 128
+#define VIRTQUEUE_DEFAULT_NUM 128
/* a single mutex to manage channel initialization and attachment */
static DEFINE_MUTEX(virtio_9p_lock);
@@ -54,6 +54,7 @@ static atomic_t vp_pinned = ATOMIC_INIT(0);
* @vc_wq: wait queue for waiting for thing to be added to ring buf
* @p9_max_pages: maximum number of pinned pages
* @sg: scatter gather list which is used to pack a request (protected?)
+ * @sg_n: amount of elements in sg array
* @chan_list: linked list of channels
*
* We keep all per-channel information in a structure.
@@ -78,6 +79,7 @@ struct virtio_chan {
unsigned long p9_max_pages;
/* Scatterlist: can be too big for stack. */
struct scatterlist *sg;
+ size_t sg_n;
/**
* @tag: name to identify a mount null terminated
*/
@@ -270,12 +272,12 @@ p9_virtio_request(struct p9_client *client, struct p9_req_t *req)
out_sgs = in_sgs = 0;
/* Handle out VirtIO ring buffers */
out = pack_sg_list(chan->sg, 0,
- VIRTQUEUE_NUM, req->tc.sdata, req->tc.size);
+ chan->sg_n, req->tc.sdata, req->tc.size);
if (out)
sgs[out_sgs++] = chan->sg;
in = pack_sg_list(chan->sg, out,
- VIRTQUEUE_NUM, req->rc.sdata, req->rc.capacity);
+ chan->sg_n, req->rc.sdata, req->rc.capacity);
if (in)
sgs[out_sgs + in_sgs++] = chan->sg + out;
@@ -447,14 +449,14 @@ p9_virtio_zc_request(struct p9_client *client, struct p9_req_t *req,
/* out data */
out = pack_sg_list(chan->sg, 0,
- VIRTQUEUE_NUM, req->tc.sdata, req->tc.size);
+ chan->sg_n, req->tc.sdata, req->tc.size);
if (out)
sgs[out_sgs++] = chan->sg;
if (out_pages) {
sgs[out_sgs++] = chan->sg + out;
- out += pack_sg_list_p(chan->sg, out, VIRTQUEUE_NUM,
+ out += pack_sg_list_p(chan->sg, out, chan->sg_n,
out_pages, out_nr_pages, offs, outlen);
}
@@ -466,13 +468,13 @@ p9_virtio_zc_request(struct p9_client *client, struct p9_req_t *req,
* allocated memory and payload onto the user buffer.
*/
in = pack_sg_list(chan->sg, out,
- VIRTQUEUE_NUM, req->rc.sdata, in_hdr_len);
+ chan->sg_n, req->rc.sdata, in_hdr_len);
if (in)
sgs[out_sgs + in_sgs++] = chan->sg + out;
if (in_pages) {
sgs[out_sgs + in_sgs++] = chan->sg + out + in;
- in += pack_sg_list_p(chan->sg, out + in, VIRTQUEUE_NUM,
+ in += pack_sg_list_p(chan->sg, out + in, chan->sg_n,
in_pages, in_nr_pages, offs, inlen);
}
@@ -574,13 +576,14 @@ static int p9_virtio_probe(struct virtio_device *vdev)
goto fail;
}
- chan->sg = kmalloc_array(VIRTQUEUE_NUM,
+ chan->sg = kmalloc_array(VIRTQUEUE_DEFAULT_NUM,
sizeof(struct scatterlist), GFP_KERNEL);
if (!chan->sg) {
pr_err("Failed to allocate virtio 9P channel\n");
err = -ENOMEM;
goto out_free_chan_shallow;
}
+ chan->sg_n = VIRTQUEUE_DEFAULT_NUM;
chan->vdev = vdev;
@@ -593,7 +596,7 @@ static int p9_virtio_probe(struct virtio_device *vdev)
chan->vq->vdev->priv = chan;
spin_lock_init(&chan->lock);
- sg_init_table(chan->sg, VIRTQUEUE_NUM);
+ sg_init_table(chan->sg, chan->sg_n);
chan->inuse = false;
if (virtio_has_feature(vdev, VIRTIO_9P_MOUNT_TAG)) {
@@ -777,7 +780,7 @@ static struct p9_trans_module p9_virtio_trans = {
* that are not at page boundary, that can result in an extra
* page in zero copy.
*/
- .maxsize = PAGE_SIZE * (VIRTQUEUE_NUM - 3),
+ .maxsize = PAGE_SIZE * (VIRTQUEUE_DEFAULT_NUM - 3),
.def = 1,
.owner = THIS_MODULE,
};
--
2.20.1
^ permalink raw reply related [flat|nested] 11+ messages in thread* [PATCH v2 4/7] 9p/trans_virtio: introduce struct virtqueue_sg
2021-09-20 16:43 [PATCH v2 0/7] net/9p: remove msize limit in virtio transport Christian Schoenebeck
` (2 preceding siblings ...)
2021-09-20 16:43 ` [PATCH v2 3/7] 9p/trans_virtio: turn amount of sg lists into runtime info Christian Schoenebeck
@ 2021-09-20 16:44 ` Christian Schoenebeck
2021-09-22 11:25 ` Christian Schoenebeck
2021-09-20 16:44 ` [PATCH v2 5/7] net/9p: add trans_maxsize to struct p9_client Christian Schoenebeck
` (2 subsequent siblings)
6 siblings, 1 reply; 11+ messages in thread
From: Christian Schoenebeck @ 2021-09-20 16:44 UTC (permalink / raw)
To: v9fs-developer
Cc: netdev, Dominique Martinet, Eric Van Hensbergen, Latchesar Ionkov,
Greg Kurz, Vivek Goyal
The amount of elements in a scatter/gather list is limited to
approximately 128 elements. To allow going beyond that limit
with subsequent patches, pave the way by turning the one-
dimensional sg list array into a two-dimensional array, i.e:
sg[128]
becomes
sgl[nsgl][SG_MAX_SINGLE_ALLOC]
As the value of 'nsgl' is exactly (still) 1 in this commit
and the compile-time (compiler and architecture dependent)
value of 'SG_MAX_SINGLE_ALLOC' equals approximately the
previous hard coded 128 elements, this commit is therefore
more of a preparatory refactoring then actual behaviour
change.
A custom struct virtqueue_sg is defined instead of using
shared API struct sg_table, because the latter would not
allow to resize the table after allocation. sg_append_table
API OTOH would not fit either, because it requires a list
of pages beforehand upon allocation. And both APIs only
support all-or-nothing allocation.
Signed-off-by: Christian Schoenebeck <linux_oss@crudebyte.com>
---
net/9p/trans_virtio.c | 190 ++++++++++++++++++++++++++++++++----------
1 file changed, 144 insertions(+), 46 deletions(-)
diff --git a/net/9p/trans_virtio.c b/net/9p/trans_virtio.c
index 3347d35a5e6e..0db8de84bd51 100644
--- a/net/9p/trans_virtio.c
+++ b/net/9p/trans_virtio.c
@@ -36,7 +36,31 @@
#include <linux/virtio_9p.h>
#include "trans_common.h"
-#define VIRTQUEUE_DEFAULT_NUM 128
+/**
+ * struct virtqueue_sg - (chained) scatter gather lists for virtqueue data
+ * transmission
+ * @nsgl: amount of elements (in first dimension) of array field @sgl
+ * @sgl: two-dimensional array, i.e. sgl[nsgl][SG_MAX_SINGLE_ALLOC]
+ */
+struct virtqueue_sg {
+ unsigned int nsgl;
+ struct scatterlist *sgl[];
+};
+
+/*
+ * Default value for field nsgl in struct virtqueue_sg, which defines the
+ * initial virtio data transmission capacity when this virtio transport is
+ * probed.
+ */
+#define VIRTQUEUE_SG_NSGL_DEFAULT 1
+
+/* maximum value for field nsgl in struct virtqueue_sg */
+#define VIRTQUEUE_SG_NSGL_MAX \
+ ((PAGE_SIZE - sizeof(struct virtqueue_sg)) / \
+ sizeof(struct scatterlist *)) \
+
+/* last entry per sg list is used for chaining (pointer to next list) */
+#define SG_USER_PAGES_PER_LIST (SG_MAX_SINGLE_ALLOC - 1)
/* a single mutex to manage channel initialization and attachment */
static DEFINE_MUTEX(virtio_9p_lock);
@@ -53,8 +77,7 @@ static atomic_t vp_pinned = ATOMIC_INIT(0);
* @ring_bufs_avail: flag to indicate there is some available in the ring buf
* @vc_wq: wait queue for waiting for thing to be added to ring buf
* @p9_max_pages: maximum number of pinned pages
- * @sg: scatter gather list which is used to pack a request (protected?)
- * @sg_n: amount of elements in sg array
+ * @vq_sg: table of scatter gather lists, which are used to pack a request
* @chan_list: linked list of channels
*
* We keep all per-channel information in a structure.
@@ -77,9 +100,7 @@ struct virtio_chan {
* will be placing it in each channel.
*/
unsigned long p9_max_pages;
- /* Scatterlist: can be too big for stack. */
- struct scatterlist *sg;
- size_t sg_n;
+ struct virtqueue_sg *vq_sg;
/**
* @tag: name to identify a mount null terminated
*/
@@ -96,6 +117,89 @@ static unsigned int rest_of_page(void *data)
return PAGE_SIZE - offset_in_page(data);
}
+/**
+ * vq_sg_page - returns user page for given page index
+ * @vq_sg: scatter gather lists used by this transport
+ * @page: user page index across all scatter gather lists
+ */
+static struct scatterlist *vq_sg_page(struct virtqueue_sg *vq_sg, size_t page)
+{
+ unsigned int node = page / SG_USER_PAGES_PER_LIST;
+ unsigned int leaf = page % SG_USER_PAGES_PER_LIST;
+ BUG_ON(node >= VIRTQUEUE_SG_NSGL_MAX);
+ return &vq_sg->sgl[node][leaf];
+}
+
+/**
+ * vq_sg_npages - returns total number of individual user pages in passed
+ * scatter gather lists
+ * @vq_sg: scatter gather lists to be counted
+ */
+static size_t vq_sg_npages(struct virtqueue_sg *vq_sg)
+{
+ return vq_sg->nsgl * SG_USER_PAGES_PER_LIST;
+}
+
+/**
+ * vq_sg_free - free all memory previously allocated for @vq_sg
+ * @vq_sg: scatter gather lists to be freed
+ */
+static void vq_sg_free(struct virtqueue_sg *vq_sg)
+{
+ unsigned int i;
+
+ for (i = 0; i < vq_sg->nsgl; ++i) {
+ kfree(vq_sg->sgl[i]);
+ }
+ kfree(vq_sg);
+}
+
+/**
+ * vq_sg_alloc - allocates and returns @nsgl scatter gather lists
+ * @nsgl: amount of scatter gather lists to be allocated
+ * If @nsgl is larger than one then chained lists are used if supported by
+ * architecture.
+ */
+static struct virtqueue_sg *vq_sg_alloc(unsigned int nsgl)
+{
+ struct virtqueue_sg *vq_sg;
+ unsigned int i;
+
+ BUG_ON(!nsgl || nsgl > VIRTQUEUE_SG_NSGL_MAX);
+#ifdef CONFIG_ARCH_NO_SG_CHAIN
+ if (WARN_ON_ONCE(nsgl > 1))
+ return NULL;
+#endif
+
+ vq_sg = kzalloc(sizeof(struct virtqueue_sg) +
+ nsgl * sizeof(struct scatterlist *),
+ GFP_KERNEL);
+
+ if (!vq_sg)
+ return NULL;
+
+ vq_sg->nsgl = nsgl;
+
+ for (i = 0; i < nsgl; ++i) {
+ vq_sg->sgl[i] = kmalloc_array(
+ SG_MAX_SINGLE_ALLOC, sizeof(struct scatterlist),
+ GFP_KERNEL
+ );
+ if (!vq_sg->sgl[i]) {
+ vq_sg_free(vq_sg);
+ return NULL;
+ }
+ sg_init_table(vq_sg->sgl[i], SG_MAX_SINGLE_ALLOC);
+ if (i) {
+ /* chain the lists */
+ sg_chain(vq_sg->sgl[i - 1], SG_MAX_SINGLE_ALLOC,
+ vq_sg->sgl[i]);
+ }
+ }
+ sg_mark_end(&vq_sg->sgl[nsgl - 1][SG_MAX_SINGLE_ALLOC - 1]);
+ return vq_sg;
+}
+
/**
* p9_virtio_close - reclaim resources of a channel
* @client: client instance
@@ -158,9 +262,8 @@ static void req_done(struct virtqueue *vq)
/**
* pack_sg_list - pack a scatter gather list from a linear buffer
- * @sg: scatter/gather list to pack into
+ * @vq_sg: scatter/gather lists to pack into
* @start: which segment of the sg_list to start at
- * @limit: maximum segment to pack data to
* @data: data to pack into scatter/gather list
* @count: amount of data to pack into the scatter/gather list
*
@@ -170,11 +273,12 @@ static void req_done(struct virtqueue *vq)
*
*/
-static int pack_sg_list(struct scatterlist *sg, int start,
- int limit, char *data, int count)
+static int pack_sg_list(struct virtqueue_sg *vq_sg, int start,
+ char *data, int count)
{
int s;
int index = start;
+ size_t limit = vq_sg_npages(vq_sg);
while (count) {
s = rest_of_page(data);
@@ -182,13 +286,13 @@ static int pack_sg_list(struct scatterlist *sg, int start,
s = count;
BUG_ON(index >= limit);
/* Make sure we don't terminate early. */
- sg_unmark_end(&sg[index]);
- sg_set_buf(&sg[index++], data, s);
+ sg_unmark_end(vq_sg_page(vq_sg, index));
+ sg_set_buf(vq_sg_page(vq_sg, index++), data, s);
count -= s;
data += s;
}
if (index-start)
- sg_mark_end(&sg[index - 1]);
+ sg_mark_end(vq_sg_page(vq_sg, index - 1));
return index-start;
}
@@ -208,21 +312,21 @@ static int p9_virtio_cancelled(struct p9_client *client, struct p9_req_t *req)
/**
* pack_sg_list_p - Just like pack_sg_list. Instead of taking a buffer,
* this takes a list of pages.
- * @sg: scatter/gather list to pack into
+ * @vq_sg: scatter/gather lists to pack into
* @start: which segment of the sg_list to start at
- * @limit: maximum number of pages in sg list.
* @pdata: a list of pages to add into sg.
* @nr_pages: number of pages to pack into the scatter/gather list
* @offs: amount of data in the beginning of first page _not_ to pack
* @count: amount of data to pack into the scatter/gather list
*/
static int
-pack_sg_list_p(struct scatterlist *sg, int start, int limit,
+pack_sg_list_p(struct virtqueue_sg *vq_sg, int start,
struct page **pdata, int nr_pages, size_t offs, int count)
{
int i = 0, s;
int data_off = offs;
int index = start;
+ size_t limit = vq_sg_npages(vq_sg);
BUG_ON(nr_pages > (limit - start));
/*
@@ -235,15 +339,16 @@ pack_sg_list_p(struct scatterlist *sg, int start, int limit,
s = count;
BUG_ON(index >= limit);
/* Make sure we don't terminate early. */
- sg_unmark_end(&sg[index]);
- sg_set_page(&sg[index++], pdata[i++], s, data_off);
+ sg_unmark_end(vq_sg_page(vq_sg, index));
+ sg_set_page(vq_sg_page(vq_sg, index++), pdata[i++], s,
+ data_off);
data_off = 0;
count -= s;
nr_pages--;
}
if (index-start)
- sg_mark_end(&sg[index - 1]);
+ sg_mark_end(vq_sg_page(vq_sg, index - 1));
return index - start;
}
@@ -271,15 +376,13 @@ p9_virtio_request(struct p9_client *client, struct p9_req_t *req)
out_sgs = in_sgs = 0;
/* Handle out VirtIO ring buffers */
- out = pack_sg_list(chan->sg, 0,
- chan->sg_n, req->tc.sdata, req->tc.size);
+ out = pack_sg_list(chan->vq_sg, 0, req->tc.sdata, req->tc.size);
if (out)
- sgs[out_sgs++] = chan->sg;
+ sgs[out_sgs++] = vq_sg_page(chan->vq_sg, 0);
- in = pack_sg_list(chan->sg, out,
- chan->sg_n, req->rc.sdata, req->rc.capacity);
+ in = pack_sg_list(chan->vq_sg, out, req->rc.sdata, req->rc.capacity);
if (in)
- sgs[out_sgs + in_sgs++] = chan->sg + out;
+ sgs[out_sgs + in_sgs++] = vq_sg_page(chan->vq_sg, out);
err = virtqueue_add_sgs(chan->vq, sgs, out_sgs, in_sgs, req,
GFP_ATOMIC);
@@ -448,16 +551,15 @@ p9_virtio_zc_request(struct p9_client *client, struct p9_req_t *req,
out_sgs = in_sgs = 0;
/* out data */
- out = pack_sg_list(chan->sg, 0,
- chan->sg_n, req->tc.sdata, req->tc.size);
+ out = pack_sg_list(chan->vq_sg, 0, req->tc.sdata, req->tc.size);
if (out)
- sgs[out_sgs++] = chan->sg;
+ sgs[out_sgs++] = vq_sg_page(chan->vq_sg, 0);
if (out_pages) {
- sgs[out_sgs++] = chan->sg + out;
- out += pack_sg_list_p(chan->sg, out, chan->sg_n,
- out_pages, out_nr_pages, offs, outlen);
+ sgs[out_sgs++] = vq_sg_page(chan->vq_sg, out);
+ out += pack_sg_list_p(chan->vq_sg, out, out_pages,
+ out_nr_pages, offs, outlen);
}
/*
@@ -467,15 +569,14 @@ p9_virtio_zc_request(struct p9_client *client, struct p9_req_t *req,
* Arrange in such a way that server places header in the
* allocated memory and payload onto the user buffer.
*/
- in = pack_sg_list(chan->sg, out,
- chan->sg_n, req->rc.sdata, in_hdr_len);
+ in = pack_sg_list(chan->vq_sg, out, req->rc.sdata, in_hdr_len);
if (in)
- sgs[out_sgs + in_sgs++] = chan->sg + out;
+ sgs[out_sgs + in_sgs++] = vq_sg_page(chan->vq_sg, out);
if (in_pages) {
- sgs[out_sgs + in_sgs++] = chan->sg + out + in;
- in += pack_sg_list_p(chan->sg, out + in, chan->sg_n,
- in_pages, in_nr_pages, offs, inlen);
+ sgs[out_sgs + in_sgs++] = vq_sg_page(chan->vq_sg, out + in);
+ in += pack_sg_list_p(chan->vq_sg, out + in, in_pages,
+ in_nr_pages, offs, inlen);
}
BUG_ON(out_sgs + in_sgs > ARRAY_SIZE(sgs));
@@ -576,14 +677,12 @@ static int p9_virtio_probe(struct virtio_device *vdev)
goto fail;
}
- chan->sg = kmalloc_array(VIRTQUEUE_DEFAULT_NUM,
- sizeof(struct scatterlist), GFP_KERNEL);
- if (!chan->sg) {
+ chan->vq_sg = vq_sg_alloc(VIRTQUEUE_SG_NSGL_DEFAULT);
+ if (!chan->vq_sg) {
pr_err("Failed to allocate virtio 9P channel\n");
err = -ENOMEM;
goto out_free_chan_shallow;
}
- chan->sg_n = VIRTQUEUE_DEFAULT_NUM;
chan->vdev = vdev;
@@ -596,8 +695,6 @@ static int p9_virtio_probe(struct virtio_device *vdev)
chan->vq->vdev->priv = chan;
spin_lock_init(&chan->lock);
- sg_init_table(chan->sg, chan->sg_n);
-
chan->inuse = false;
if (virtio_has_feature(vdev, VIRTIO_9P_MOUNT_TAG)) {
virtio_cread(vdev, struct virtio_9p_config, tag_len, &tag_len);
@@ -646,7 +743,7 @@ static int p9_virtio_probe(struct virtio_device *vdev)
out_free_vq:
vdev->config->del_vqs(vdev);
out_free_chan:
- kfree(chan->sg);
+ vq_sg_free(chan->vq_sg);
out_free_chan_shallow:
kfree(chan);
fail:
@@ -741,7 +838,7 @@ static void p9_virtio_remove(struct virtio_device *vdev)
kobject_uevent(&(vdev->dev.kobj), KOBJ_CHANGE);
kfree(chan->tag);
kfree(chan->vc_wq);
- kfree(chan->sg);
+ vq_sg_free(chan->vq_sg);
kfree(chan);
}
@@ -780,7 +877,8 @@ static struct p9_trans_module p9_virtio_trans = {
* that are not at page boundary, that can result in an extra
* page in zero copy.
*/
- .maxsize = PAGE_SIZE * (VIRTQUEUE_DEFAULT_NUM - 3),
+ .maxsize = PAGE_SIZE *
+ ((VIRTQUEUE_SG_NSGL_DEFAULT * SG_USER_PAGES_PER_LIST) - 3),
.def = 1,
.owner = THIS_MODULE,
};
--
2.20.1
^ permalink raw reply related [flat|nested] 11+ messages in thread* Re: [PATCH v2 4/7] 9p/trans_virtio: introduce struct virtqueue_sg
2021-09-20 16:44 ` [PATCH v2 4/7] 9p/trans_virtio: introduce struct virtqueue_sg Christian Schoenebeck
@ 2021-09-22 11:25 ` Christian Schoenebeck
0 siblings, 0 replies; 11+ messages in thread
From: Christian Schoenebeck @ 2021-09-22 11:25 UTC (permalink / raw)
To: v9fs-developer
Cc: netdev, Dominique Martinet, Eric Van Hensbergen, Latchesar Ionkov,
Greg Kurz, Vivek Goyal
On Montag, 20. September 2021 18:44:00 CEST Christian Schoenebeck wrote:
> The amount of elements in a scatter/gather list is limited to
> approximately 128 elements. To allow going beyond that limit
> with subsequent patches, pave the way by turning the one-
> dimensional sg list array into a two-dimensional array, i.e:
>
> sg[128]
>
> becomes
>
> sgl[nsgl][SG_MAX_SINGLE_ALLOC]
>
> As the value of 'nsgl' is exactly (still) 1 in this commit
> and the compile-time (compiler and architecture dependent)
> value of 'SG_MAX_SINGLE_ALLOC' equals approximately the
> previous hard coded 128 elements, this commit is therefore
> more of a preparatory refactoring then actual behaviour
> change.
>
> A custom struct virtqueue_sg is defined instead of using
> shared API struct sg_table, because the latter would not
> allow to resize the table after allocation. sg_append_table
> API OTOH would not fit either, because it requires a list
> of pages beforehand upon allocation. And both APIs only
> support all-or-nothing allocation.
>
> Signed-off-by: Christian Schoenebeck <linux_oss@crudebyte.com>
> ---
> net/9p/trans_virtio.c | 190 ++++++++++++++++++++++++++++++++----------
> 1 file changed, 144 insertions(+), 46 deletions(-)
>
> diff --git a/net/9p/trans_virtio.c b/net/9p/trans_virtio.c
> index 3347d35a5e6e..0db8de84bd51 100644
> --- a/net/9p/trans_virtio.c
> +++ b/net/9p/trans_virtio.c
> @@ -36,7 +36,31 @@
> #include <linux/virtio_9p.h>
> #include "trans_common.h"
>
> -#define VIRTQUEUE_DEFAULT_NUM 128
> +/**
> + * struct virtqueue_sg - (chained) scatter gather lists for virtqueue data
> + * transmission
> + * @nsgl: amount of elements (in first dimension) of array field @sgl
> + * @sgl: two-dimensional array, i.e. sgl[nsgl][SG_MAX_SINGLE_ALLOC]
> + */
> +struct virtqueue_sg {
> + unsigned int nsgl;
> + struct scatterlist *sgl[];
> +};
> +
> +/*
> + * Default value for field nsgl in struct virtqueue_sg, which defines the
> + * initial virtio data transmission capacity when this virtio transport is
> + * probed.
> + */
> +#define VIRTQUEUE_SG_NSGL_DEFAULT 1
> +
> +/* maximum value for field nsgl in struct virtqueue_sg */
> +#define VIRTQUEUE_SG_NSGL_MAX \
> + ((PAGE_SIZE - sizeof(struct virtqueue_sg)) / \
> + sizeof(struct scatterlist *)) \
> +
> +/* last entry per sg list is used for chaining (pointer to next list) */
> +#define SG_USER_PAGES_PER_LIST (SG_MAX_SINGLE_ALLOC - 1)
>
> /* a single mutex to manage channel initialization and attachment */
> static DEFINE_MUTEX(virtio_9p_lock);
> @@ -53,8 +77,7 @@ static atomic_t vp_pinned = ATOMIC_INIT(0);
> * @ring_bufs_avail: flag to indicate there is some available in the ring
> buf * @vc_wq: wait queue for waiting for thing to be added to ring buf *
> @p9_max_pages: maximum number of pinned pages
> - * @sg: scatter gather list which is used to pack a request (protected?)
> - * @sg_n: amount of elements in sg array
> + * @vq_sg: table of scatter gather lists, which are used to pack a request
> * @chan_list: linked list of channels
> *
> * We keep all per-channel information in a structure.
> @@ -77,9 +100,7 @@ struct virtio_chan {
> * will be placing it in each channel.
> */
> unsigned long p9_max_pages;
> - /* Scatterlist: can be too big for stack. */
> - struct scatterlist *sg;
> - size_t sg_n;
> + struct virtqueue_sg *vq_sg;
> /**
> * @tag: name to identify a mount null terminated
> */
> @@ -96,6 +117,89 @@ static unsigned int rest_of_page(void *data)
> return PAGE_SIZE - offset_in_page(data);
> }
>
> +/**
> + * vq_sg_page - returns user page for given page index
> + * @vq_sg: scatter gather lists used by this transport
> + * @page: user page index across all scatter gather lists
> + */
> +static struct scatterlist *vq_sg_page(struct virtqueue_sg *vq_sg, size_t
> page) +{
> + unsigned int node = page / SG_USER_PAGES_PER_LIST;
> + unsigned int leaf = page % SG_USER_PAGES_PER_LIST;
> + BUG_ON(node >= VIRTQUEUE_SG_NSGL_MAX);
> + return &vq_sg->sgl[node][leaf];
> +}
> +
> +/**
> + * vq_sg_npages - returns total number of individual user pages in passed
> + * scatter gather lists
> + * @vq_sg: scatter gather lists to be counted
> + */
> +static size_t vq_sg_npages(struct virtqueue_sg *vq_sg)
> +{
> + return vq_sg->nsgl * SG_USER_PAGES_PER_LIST;
> +}
> +
> +/**
> + * vq_sg_free - free all memory previously allocated for @vq_sg
> + * @vq_sg: scatter gather lists to be freed
> + */
> +static void vq_sg_free(struct virtqueue_sg *vq_sg)
> +{
> + unsigned int i;
> +
Here should be a
if (!vq_sg) return;
Not a real issue right now as vq_sg_free() is never called with NULL anywhere,
but it is common expactation that free calls should safely handle NULL
arguments.
> + for (i = 0; i < vq_sg->nsgl; ++i) {
> + kfree(vq_sg->sgl[i]);
> + }
> + kfree(vq_sg);
> +}
> +
> +/**
> + * vq_sg_alloc - allocates and returns @nsgl scatter gather lists
> + * @nsgl: amount of scatter gather lists to be allocated
> + * If @nsgl is larger than one then chained lists are used if supported by
> + * architecture.
> + */
> +static struct virtqueue_sg *vq_sg_alloc(unsigned int nsgl)
> +{
> + struct virtqueue_sg *vq_sg;
> + unsigned int i;
> +
> + BUG_ON(!nsgl || nsgl > VIRTQUEUE_SG_NSGL_MAX);
> +#ifdef CONFIG_ARCH_NO_SG_CHAIN
> + if (WARN_ON_ONCE(nsgl > 1))
> + return NULL;
> +#endif
> +
> + vq_sg = kzalloc(sizeof(struct virtqueue_sg) +
> + nsgl * sizeof(struct scatterlist *),
> + GFP_KERNEL);
> +
> + if (!vq_sg)
> + return NULL;
> +
> + vq_sg->nsgl = nsgl;
> +
> + for (i = 0; i < nsgl; ++i) {
> + vq_sg->sgl[i] = kmalloc_array(
> + SG_MAX_SINGLE_ALLOC, sizeof(struct scatterlist),
> + GFP_KERNEL
> + );
> + if (!vq_sg->sgl[i]) {
> + vq_sg_free(vq_sg);
> + return NULL;
> + }
> + sg_init_table(vq_sg->sgl[i], SG_MAX_SINGLE_ALLOC);
> + if (i) {
> + /* chain the lists */
> + sg_chain(vq_sg->sgl[i - 1], SG_MAX_SINGLE_ALLOC,
> + vq_sg->sgl[i]);
> + }
> + }
> + sg_mark_end(&vq_sg->sgl[nsgl - 1][SG_MAX_SINGLE_ALLOC - 1]);
> + return vq_sg;
> +}
> +
> /**
> * p9_virtio_close - reclaim resources of a channel
> * @client: client instance
> @@ -158,9 +262,8 @@ static void req_done(struct virtqueue *vq)
>
> /**
> * pack_sg_list - pack a scatter gather list from a linear buffer
> - * @sg: scatter/gather list to pack into
> + * @vq_sg: scatter/gather lists to pack into
> * @start: which segment of the sg_list to start at
> - * @limit: maximum segment to pack data to
> * @data: data to pack into scatter/gather list
> * @count: amount of data to pack into the scatter/gather list
> *
> @@ -170,11 +273,12 @@ static void req_done(struct virtqueue *vq)
> *
> */
>
> -static int pack_sg_list(struct scatterlist *sg, int start,
> - int limit, char *data, int count)
> +static int pack_sg_list(struct virtqueue_sg *vq_sg, int start,
> + char *data, int count)
> {
> int s;
> int index = start;
> + size_t limit = vq_sg_npages(vq_sg);
>
> while (count) {
> s = rest_of_page(data);
> @@ -182,13 +286,13 @@ static int pack_sg_list(struct scatterlist *sg, int
> start, s = count;
> BUG_ON(index >= limit);
> /* Make sure we don't terminate early. */
> - sg_unmark_end(&sg[index]);
> - sg_set_buf(&sg[index++], data, s);
> + sg_unmark_end(vq_sg_page(vq_sg, index));
> + sg_set_buf(vq_sg_page(vq_sg, index++), data, s);
> count -= s;
> data += s;
> }
> if (index-start)
> - sg_mark_end(&sg[index - 1]);
> + sg_mark_end(vq_sg_page(vq_sg, index - 1));
> return index-start;
> }
>
> @@ -208,21 +312,21 @@ static int p9_virtio_cancelled(struct p9_client
> *client, struct p9_req_t *req) /**
> * pack_sg_list_p - Just like pack_sg_list. Instead of taking a buffer,
> * this takes a list of pages.
> - * @sg: scatter/gather list to pack into
> + * @vq_sg: scatter/gather lists to pack into
> * @start: which segment of the sg_list to start at
> - * @limit: maximum number of pages in sg list.
> * @pdata: a list of pages to add into sg.
> * @nr_pages: number of pages to pack into the scatter/gather list
> * @offs: amount of data in the beginning of first page _not_ to pack
> * @count: amount of data to pack into the scatter/gather list
> */
> static int
> -pack_sg_list_p(struct scatterlist *sg, int start, int limit,
> +pack_sg_list_p(struct virtqueue_sg *vq_sg, int start,
> struct page **pdata, int nr_pages, size_t offs, int count)
> {
> int i = 0, s;
> int data_off = offs;
> int index = start;
> + size_t limit = vq_sg_npages(vq_sg);
>
> BUG_ON(nr_pages > (limit - start));
> /*
> @@ -235,15 +339,16 @@ pack_sg_list_p(struct scatterlist *sg, int start, int
> limit, s = count;
> BUG_ON(index >= limit);
> /* Make sure we don't terminate early. */
> - sg_unmark_end(&sg[index]);
> - sg_set_page(&sg[index++], pdata[i++], s, data_off);
> + sg_unmark_end(vq_sg_page(vq_sg, index));
> + sg_set_page(vq_sg_page(vq_sg, index++), pdata[i++], s,
> + data_off);
> data_off = 0;
> count -= s;
> nr_pages--;
> }
>
> if (index-start)
> - sg_mark_end(&sg[index - 1]);
> + sg_mark_end(vq_sg_page(vq_sg, index - 1));
> return index - start;
> }
>
> @@ -271,15 +376,13 @@ p9_virtio_request(struct p9_client *client, struct
> p9_req_t *req)
>
> out_sgs = in_sgs = 0;
> /* Handle out VirtIO ring buffers */
> - out = pack_sg_list(chan->sg, 0,
> - chan->sg_n, req->tc.sdata, req->tc.size);
> + out = pack_sg_list(chan->vq_sg, 0, req->tc.sdata, req->tc.size);
> if (out)
> - sgs[out_sgs++] = chan->sg;
> + sgs[out_sgs++] = vq_sg_page(chan->vq_sg, 0);
>
> - in = pack_sg_list(chan->sg, out,
> - chan->sg_n, req->rc.sdata, req->rc.capacity);
> + in = pack_sg_list(chan->vq_sg, out, req->rc.sdata, req->rc.capacity);
> if (in)
> - sgs[out_sgs + in_sgs++] = chan->sg + out;
> + sgs[out_sgs + in_sgs++] = vq_sg_page(chan->vq_sg, out);
>
> err = virtqueue_add_sgs(chan->vq, sgs, out_sgs, in_sgs, req,
> GFP_ATOMIC);
> @@ -448,16 +551,15 @@ p9_virtio_zc_request(struct p9_client *client, struct
> p9_req_t *req, out_sgs = in_sgs = 0;
>
> /* out data */
> - out = pack_sg_list(chan->sg, 0,
> - chan->sg_n, req->tc.sdata, req->tc.size);
> + out = pack_sg_list(chan->vq_sg, 0, req->tc.sdata, req->tc.size);
>
> if (out)
> - sgs[out_sgs++] = chan->sg;
> + sgs[out_sgs++] = vq_sg_page(chan->vq_sg, 0);
>
> if (out_pages) {
> - sgs[out_sgs++] = chan->sg + out;
> - out += pack_sg_list_p(chan->sg, out, chan->sg_n,
> - out_pages, out_nr_pages, offs, outlen);
> + sgs[out_sgs++] = vq_sg_page(chan->vq_sg, out);
> + out += pack_sg_list_p(chan->vq_sg, out, out_pages,
> + out_nr_pages, offs, outlen);
> }
>
> /*
> @@ -467,15 +569,14 @@ p9_virtio_zc_request(struct p9_client *client, struct
> p9_req_t *req, * Arrange in such a way that server places header in the
> * allocated memory and payload onto the user buffer.
> */
> - in = pack_sg_list(chan->sg, out,
> - chan->sg_n, req->rc.sdata, in_hdr_len);
> + in = pack_sg_list(chan->vq_sg, out, req->rc.sdata, in_hdr_len);
> if (in)
> - sgs[out_sgs + in_sgs++] = chan->sg + out;
> + sgs[out_sgs + in_sgs++] = vq_sg_page(chan->vq_sg, out);
>
> if (in_pages) {
> - sgs[out_sgs + in_sgs++] = chan->sg + out + in;
> - in += pack_sg_list_p(chan->sg, out + in, chan->sg_n,
> - in_pages, in_nr_pages, offs, inlen);
> + sgs[out_sgs + in_sgs++] = vq_sg_page(chan->vq_sg, out + in);
> + in += pack_sg_list_p(chan->vq_sg, out + in, in_pages,
> + in_nr_pages, offs, inlen);
> }
>
> BUG_ON(out_sgs + in_sgs > ARRAY_SIZE(sgs));
> @@ -576,14 +677,12 @@ static int p9_virtio_probe(struct virtio_device *vdev)
> goto fail;
> }
>
> - chan->sg = kmalloc_array(VIRTQUEUE_DEFAULT_NUM,
> - sizeof(struct scatterlist), GFP_KERNEL);
> - if (!chan->sg) {
> + chan->vq_sg = vq_sg_alloc(VIRTQUEUE_SG_NSGL_DEFAULT);
> + if (!chan->vq_sg) {
> pr_err("Failed to allocate virtio 9P channel\n");
> err = -ENOMEM;
> goto out_free_chan_shallow;
> }
> - chan->sg_n = VIRTQUEUE_DEFAULT_NUM;
>
> chan->vdev = vdev;
>
> @@ -596,8 +695,6 @@ static int p9_virtio_probe(struct virtio_device *vdev)
> chan->vq->vdev->priv = chan;
> spin_lock_init(&chan->lock);
>
> - sg_init_table(chan->sg, chan->sg_n);
> -
> chan->inuse = false;
> if (virtio_has_feature(vdev, VIRTIO_9P_MOUNT_TAG)) {
> virtio_cread(vdev, struct virtio_9p_config, tag_len, &tag_len);
> @@ -646,7 +743,7 @@ static int p9_virtio_probe(struct virtio_device *vdev)
> out_free_vq:
> vdev->config->del_vqs(vdev);
> out_free_chan:
> - kfree(chan->sg);
> + vq_sg_free(chan->vq_sg);
> out_free_chan_shallow:
> kfree(chan);
> fail:
> @@ -741,7 +838,7 @@ static void p9_virtio_remove(struct virtio_device *vdev)
> kobject_uevent(&(vdev->dev.kobj), KOBJ_CHANGE);
> kfree(chan->tag);
> kfree(chan->vc_wq);
> - kfree(chan->sg);
> + vq_sg_free(chan->vq_sg);
> kfree(chan);
>
> }
> @@ -780,7 +877,8 @@ static struct p9_trans_module p9_virtio_trans = {
> * that are not at page boundary, that can result in an extra
> * page in zero copy.
> */
> - .maxsize = PAGE_SIZE * (VIRTQUEUE_DEFAULT_NUM - 3),
> + .maxsize = PAGE_SIZE *
> + ((VIRTQUEUE_SG_NSGL_DEFAULT * SG_USER_PAGES_PER_LIST) - 3),
> .def = 1,
> .owner = THIS_MODULE,
> };
^ permalink raw reply [flat|nested] 11+ messages in thread
* [PATCH v2 5/7] net/9p: add trans_maxsize to struct p9_client
2021-09-20 16:43 [PATCH v2 0/7] net/9p: remove msize limit in virtio transport Christian Schoenebeck
` (3 preceding siblings ...)
2021-09-20 16:44 ` [PATCH v2 4/7] 9p/trans_virtio: introduce struct virtqueue_sg Christian Schoenebeck
@ 2021-09-20 16:44 ` Christian Schoenebeck
2021-09-20 16:44 ` [PATCH v2 6/7] 9p/trans_virtio: support larger msize values Christian Schoenebeck
2021-09-20 16:44 ` [PATCH v2 7/7] 9p/trans_virtio: resize sg lists to whatever is possible Christian Schoenebeck
6 siblings, 0 replies; 11+ messages in thread
From: Christian Schoenebeck @ 2021-09-20 16:44 UTC (permalink / raw)
To: v9fs-developer
Cc: netdev, Dominique Martinet, Eric Van Hensbergen, Latchesar Ionkov,
Greg Kurz, Vivek Goyal
This new field 'trans_maxsize' optionally allows transport to
update it to reflect the actual maximum msize supported by
allocated transport channel.
Signed-off-by: Christian Schoenebeck <linux_oss@crudebyte.com>
---
include/net/9p/client.h | 2 ++
net/9p/client.c | 12 ++++++++++--
2 files changed, 12 insertions(+), 2 deletions(-)
diff --git a/include/net/9p/client.h b/include/net/9p/client.h
index e1c308d8d288..e48c4cdf9be0 100644
--- a/include/net/9p/client.h
+++ b/include/net/9p/client.h
@@ -89,6 +89,7 @@ struct p9_req_t {
* struct p9_client - per client instance state
* @lock: protect @fids and @reqs
* @msize: maximum data size negotiated by protocol
+ * @trans_maxsize: actual maximum msize supported by transport channel
* @proto_version: 9P protocol version to use
* @trans_mod: module API instantiated with this client
* @status: connection state
@@ -103,6 +104,7 @@ struct p9_req_t {
struct p9_client {
spinlock_t lock;
unsigned int msize;
+ unsigned int trans_maxsize;
unsigned char proto_version;
struct p9_trans_module *trans_mod;
enum p9_trans_status status;
diff --git a/net/9p/client.c b/net/9p/client.c
index 4f4fd2098a30..a75034fa249b 100644
--- a/net/9p/client.c
+++ b/net/9p/client.c
@@ -1037,6 +1037,14 @@ struct p9_client *p9_client_create(const char *dev_name, char *options)
goto free_client;
}
+ /*
+ * transport will get a chance to increase trans_maxsize (if
+ * necessary) and it may update trans_maxsize in create() function
+ * below accordingly to reflect the actual maximum size supported by
+ * the allocated transport channel
+ */
+ clnt->trans_maxsize = clnt->trans_mod->maxsize;
+
p9_debug(P9_DEBUG_MUX, "clnt %p trans %p msize %d protocol %d\n",
clnt, clnt->trans_mod, clnt->msize, clnt->proto_version);
@@ -1044,8 +1052,8 @@ struct p9_client *p9_client_create(const char *dev_name, char *options)
if (err)
goto put_trans;
- if (clnt->msize > clnt->trans_mod->maxsize) {
- clnt->msize = clnt->trans_mod->maxsize;
+ if (clnt->msize > clnt->trans_maxsize) {
+ clnt->msize = clnt->trans_maxsize;
pr_info("Limiting 'msize' to %d as this is the maximum "
"supported by transport %s\n",
clnt->msize, clnt->trans_mod->name
--
2.20.1
^ permalink raw reply related [flat|nested] 11+ messages in thread* [PATCH v2 6/7] 9p/trans_virtio: support larger msize values
2021-09-20 16:43 [PATCH v2 0/7] net/9p: remove msize limit in virtio transport Christian Schoenebeck
` (4 preceding siblings ...)
2021-09-20 16:44 ` [PATCH v2 5/7] net/9p: add trans_maxsize to struct p9_client Christian Schoenebeck
@ 2021-09-20 16:44 ` Christian Schoenebeck
2021-09-22 11:42 ` Christian Schoenebeck
2021-09-20 16:44 ` [PATCH v2 7/7] 9p/trans_virtio: resize sg lists to whatever is possible Christian Schoenebeck
6 siblings, 1 reply; 11+ messages in thread
From: Christian Schoenebeck @ 2021-09-20 16:44 UTC (permalink / raw)
To: v9fs-developer
Cc: netdev, Dominique Martinet, Eric Van Hensbergen, Latchesar Ionkov,
Greg Kurz, Vivek Goyal
The virtio transport supports by default a 9p 'msize' of up to
approximately 500 kB. This patch adds support for larger 'msize'
values by resizing the amount of scatter/gather lists if required.
Signed-off-by: Christian Schoenebeck <linux_oss@crudebyte.com>
---
net/9p/trans_virtio.c | 57 +++++++++++++++++++++++++++++++++++++++++++
1 file changed, 57 insertions(+)
diff --git a/net/9p/trans_virtio.c b/net/9p/trans_virtio.c
index 0db8de84bd51..4cb75f45aa15 100644
--- a/net/9p/trans_virtio.c
+++ b/net/9p/trans_virtio.c
@@ -200,6 +200,31 @@ static struct virtqueue_sg *vq_sg_alloc(unsigned int nsgl)
return vq_sg;
}
+/**
+ * vq_sg_resize - resize passed virtqueue scatter/gather lists to the passed
+ * amount of lists
+ * @_vq_sg: scatter/gather lists to be resized
+ * @nsgl: new amount of scatter/gather lists
+ */
+static int vq_sg_resize(struct virtqueue_sg **_vq_sg, unsigned int nsgl)
+{
+ struct virtqueue_sg *vq_sg;
+
+ BUG_ON(!_vq_sg || !nsgl);
+ vq_sg = *_vq_sg;
+ if (vq_sg->nsgl == nsgl)
+ return 0;
+
+ /* lazy resize implementation for now */
+ vq_sg = vq_sg_alloc(nsgl);
+ if (!vq_sg)
+ return -ENOMEM;
+
+ kfree(*_vq_sg);
+ *_vq_sg = vq_sg;
+ return 0;
+}
+
/**
* p9_virtio_close - reclaim resources of a channel
* @client: client instance
@@ -771,6 +796,10 @@ p9_virtio_create(struct p9_client *client, const char *devname, char *args)
struct virtio_chan *chan;
int ret = -ENOENT;
int found = 0;
+#if !defined(CONFIG_ARCH_NO_SG_CHAIN)
+ size_t npages;
+ size_t nsgl;
+#endif
if (devname == NULL)
return -EINVAL;
@@ -793,6 +822,34 @@ p9_virtio_create(struct p9_client *client, const char *devname, char *args)
return ret;
}
+ /*
+ * if user supplied an 'msize' option that's larger than what this
+ * transport supports by default, then try to allocate more sg lists
+ */
+ if (client->msize > client->trans_maxsize) {
+#ifdef CONFIG_ARCH_NO_SG_CHAIN
+ pr_info("limiting 'msize' to %d because architecture does not "
+ "support chained scatter gather lists\n",
+ client->trans_maxsize);
+#else
+ npages = DIV_ROUND_UP(client->msize, PAGE_SIZE);
+ if (npages > chan->p9_max_pages)
+ npages = chan->p9_max_pages;
+ nsgl = DIV_ROUND_UP(npages, SG_USER_PAGES_PER_LIST);
+ if (nsgl > chan->vq_sg->nsgl) {
+ /*
+ * if resize fails, no big deal, then just
+ * continue with default msize instead
+ */
+ if (!vq_sg_resize(&chan->vq_sg, nsgl)) {
+ client->trans_maxsize =
+ PAGE_SIZE *
+ ((nsgl * SG_USER_PAGES_PER_LIST) - 3);
+ }
+ }
+#endif /* CONFIG_ARCH_NO_SG_CHAIN */
+ }
+
client->trans = (void *)chan;
client->status = Connected;
chan->client = client;
--
2.20.1
^ permalink raw reply related [flat|nested] 11+ messages in thread* Re: [PATCH v2 6/7] 9p/trans_virtio: support larger msize values
2021-09-20 16:44 ` [PATCH v2 6/7] 9p/trans_virtio: support larger msize values Christian Schoenebeck
@ 2021-09-22 11:42 ` Christian Schoenebeck
0 siblings, 0 replies; 11+ messages in thread
From: Christian Schoenebeck @ 2021-09-22 11:42 UTC (permalink / raw)
To: v9fs-developer
Cc: netdev, Dominique Martinet, Eric Van Hensbergen, Latchesar Ionkov,
Greg Kurz, Vivek Goyal
On Montag, 20. September 2021 18:44:13 CEST Christian Schoenebeck wrote:
> The virtio transport supports by default a 9p 'msize' of up to
> approximately 500 kB. This patch adds support for larger 'msize'
> values by resizing the amount of scatter/gather lists if required.
>
> Signed-off-by: Christian Schoenebeck <linux_oss@crudebyte.com>
> ---
> net/9p/trans_virtio.c | 57 +++++++++++++++++++++++++++++++++++++++++++
> 1 file changed, 57 insertions(+)
>
> diff --git a/net/9p/trans_virtio.c b/net/9p/trans_virtio.c
> index 0db8de84bd51..4cb75f45aa15 100644
> --- a/net/9p/trans_virtio.c
> +++ b/net/9p/trans_virtio.c
> @@ -200,6 +200,31 @@ static struct virtqueue_sg *vq_sg_alloc(unsigned int
> nsgl) return vq_sg;
> }
>
> +/**
> + * vq_sg_resize - resize passed virtqueue scatter/gather lists to the
> passed + * amount of lists
> + * @_vq_sg: scatter/gather lists to be resized
> + * @nsgl: new amount of scatter/gather lists
> + */
> +static int vq_sg_resize(struct virtqueue_sg **_vq_sg, unsigned int nsgl)
> +{
> + struct virtqueue_sg *vq_sg;
> +
> + BUG_ON(!_vq_sg || !nsgl);
> + vq_sg = *_vq_sg;
> + if (vq_sg->nsgl == nsgl)
> + return 0;
> +
> + /* lazy resize implementation for now */
> + vq_sg = vq_sg_alloc(nsgl);
> + if (!vq_sg)
> + return -ENOMEM;
> +
> + kfree(*_vq_sg);
> + *_vq_sg = vq_sg;
> + return 0;
> +}
> +
> /**
> * p9_virtio_close - reclaim resources of a channel
> * @client: client instance
> @@ -771,6 +796,10 @@ p9_virtio_create(struct p9_client *client, const char
> *devname, char *args) struct virtio_chan *chan;
> int ret = -ENOENT;
> int found = 0;
> +#if !defined(CONFIG_ARCH_NO_SG_CHAIN)
> + size_t npages;
> + size_t nsgl;
> +#endif
>
> if (devname == NULL)
> return -EINVAL;
> @@ -793,6 +822,34 @@ p9_virtio_create(struct p9_client *client, const char
> *devname, char *args) return ret;
> }
>
> + /*
> + * if user supplied an 'msize' option that's larger than what this
> + * transport supports by default, then try to allocate more sg lists
> + */
> + if (client->msize > client->trans_maxsize) {
> +#ifdef CONFIG_ARCH_NO_SG_CHAIN
> + pr_info("limiting 'msize' to %d because architecture does not "
> + "support chained scatter gather lists\n",
> + client->trans_maxsize);
> +#else
> + npages = DIV_ROUND_UP(client->msize, PAGE_SIZE);
> + if (npages > chan->p9_max_pages)
> + npages = chan->p9_max_pages;
> + nsgl = DIV_ROUND_UP(npages, SG_USER_PAGES_PER_LIST);
> + if (nsgl > chan->vq_sg->nsgl) {
> + /*
> + * if resize fails, no big deal, then just
> + * continue with default msize instead
> + */
> + if (!vq_sg_resize(&chan->vq_sg, nsgl)) {
> + client->trans_maxsize =
> + PAGE_SIZE *
> + ((nsgl * SG_USER_PAGES_PER_LIST) - 3);
> + }
> + }
Maybe an ...
} else {
pr_info(...);
}
would be useful here to explain the user why transport refrained from
increasing its max size even though user's msize option demanded it.
> +#endif /* CONFIG_ARCH_NO_SG_CHAIN */
> + }
> +
> client->trans = (void *)chan;
> client->status = Connected;
> chan->client = client;
^ permalink raw reply [flat|nested] 11+ messages in thread
* [PATCH v2 7/7] 9p/trans_virtio: resize sg lists to whatever is possible
2021-09-20 16:43 [PATCH v2 0/7] net/9p: remove msize limit in virtio transport Christian Schoenebeck
` (5 preceding siblings ...)
2021-09-20 16:44 ` [PATCH v2 6/7] 9p/trans_virtio: support larger msize values Christian Schoenebeck
@ 2021-09-20 16:44 ` Christian Schoenebeck
2021-09-22 11:56 ` Christian Schoenebeck
6 siblings, 1 reply; 11+ messages in thread
From: Christian Schoenebeck @ 2021-09-20 16:44 UTC (permalink / raw)
To: v9fs-developer
Cc: netdev, Dominique Martinet, Eric Van Hensbergen, Latchesar Ionkov,
Greg Kurz, Vivek Goyal
Right now vq_sg_resize() used a lazy implementation following
the all-or-nothing princible. So it either resized exactly to
the requested new amount of sg lists, or it did not resize at
all.
The problem with this is if a user supplies a very large msize
value, resize would simply fail and the user would stick to
the default maximum msize supported by the virtio transport.
To resolve this potential issue, change vq_sg_resize() to resize
the passed sg list to whatever is possible on the machine.
Signed-off-by: Christian Schoenebeck <linux_oss@crudebyte.com>
---
net/9p/trans_virtio.c | 65 ++++++++++++++++++++++++++++++++++++-------
1 file changed, 55 insertions(+), 10 deletions(-)
diff --git a/net/9p/trans_virtio.c b/net/9p/trans_virtio.c
index 4cb75f45aa15..b9bab7ed2768 100644
--- a/net/9p/trans_virtio.c
+++ b/net/9p/trans_virtio.c
@@ -205,24 +205,66 @@ static struct virtqueue_sg *vq_sg_alloc(unsigned int nsgl)
* amount of lists
* @_vq_sg: scatter/gather lists to be resized
* @nsgl: new amount of scatter/gather lists
+ *
+ * Old scatter/gather lists are retained. Only growing the size is supported.
+ * If the requested amount cannot be satisfied, then lists are increased to
+ * whatever is possible.
*/
static int vq_sg_resize(struct virtqueue_sg **_vq_sg, unsigned int nsgl)
{
struct virtqueue_sg *vq_sg;
+ unsigned int i;
+ size_t sz;
+ int ret = 0;
BUG_ON(!_vq_sg || !nsgl);
vq_sg = *_vq_sg;
+ if (nsgl > VIRTQUEUE_SG_NSGL_MAX)
+ nsgl = VIRTQUEUE_SG_NSGL_MAX;
if (vq_sg->nsgl == nsgl)
return 0;
+ if (vq_sg->nsgl > nsgl)
+ return -ENOTSUPP;
+
+ vq_sg = kzalloc(sizeof(struct virtqueue_sg) +
+ nsgl * sizeof(struct scatterlist *),
+ GFP_KERNEL);
- /* lazy resize implementation for now */
- vq_sg = vq_sg_alloc(nsgl);
if (!vq_sg)
return -ENOMEM;
- kfree(*_vq_sg);
+ /* copy over old scatter gather lists */
+ sz = sizeof(struct virtqueue_sg) +
+ (*_vq_sg)->nsgl * sizeof(struct scatterlist *);
+ memcpy(vq_sg, *_vq_sg, sz);
+
+ vq_sg->nsgl = nsgl;
+
+ for (i = (*_vq_sg)->nsgl; i < nsgl; ++i) {
+ vq_sg->sgl[i] = kmalloc_array(
+ SG_MAX_SINGLE_ALLOC, sizeof(struct scatterlist),
+ GFP_KERNEL
+ );
+ /*
+ * handle failed allocation as soft error, we take whatever
+ * we get
+ */
+ if (!vq_sg->sgl[i]) {
+ ret = -ENOMEM;
+ vq_sg->nsgl = nsgl = i;
+ break;
+ }
+ sg_init_table(vq_sg->sgl[i], SG_MAX_SINGLE_ALLOC);
+ if (i) {
+ /* chain the lists */
+ sg_chain(vq_sg->sgl[i - 1], SG_MAX_SINGLE_ALLOC,
+ vq_sg->sgl[i]);
+ }
+ }
+ sg_mark_end(&vq_sg->sgl[nsgl - 1][SG_MAX_SINGLE_ALLOC - 1]);
+
*_vq_sg = vq_sg;
- return 0;
+ return ret;
}
/**
@@ -839,13 +881,16 @@ p9_virtio_create(struct p9_client *client, const char *devname, char *args)
if (nsgl > chan->vq_sg->nsgl) {
/*
* if resize fails, no big deal, then just
- * continue with default msize instead
+ * continue with whatever we got
*/
- if (!vq_sg_resize(&chan->vq_sg, nsgl)) {
- client->trans_maxsize =
- PAGE_SIZE *
- ((nsgl * SG_USER_PAGES_PER_LIST) - 3);
- }
+ vq_sg_resize(&chan->vq_sg, nsgl);
+ /*
+ * actual allocation size might be less than
+ * requested, so use vq_sg->nsgl instead of nsgl
+ */
+ client->trans_maxsize =
+ PAGE_SIZE * ((chan->vq_sg->nsgl *
+ SG_USER_PAGES_PER_LIST) - 3);
}
#endif /* CONFIG_ARCH_NO_SG_CHAIN */
}
--
2.20.1
^ permalink raw reply related [flat|nested] 11+ messages in thread* Re: [PATCH v2 7/7] 9p/trans_virtio: resize sg lists to whatever is possible
2021-09-20 16:44 ` [PATCH v2 7/7] 9p/trans_virtio: resize sg lists to whatever is possible Christian Schoenebeck
@ 2021-09-22 11:56 ` Christian Schoenebeck
0 siblings, 0 replies; 11+ messages in thread
From: Christian Schoenebeck @ 2021-09-22 11:56 UTC (permalink / raw)
To: v9fs-developer
Cc: netdev, Dominique Martinet, Eric Van Hensbergen, Latchesar Ionkov,
Greg Kurz, Vivek Goyal
On Montag, 20. September 2021 18:44:20 CEST Christian Schoenebeck wrote:
> Right now vq_sg_resize() used a lazy implementation following
> the all-or-nothing princible. So it either resized exactly to
> the requested new amount of sg lists, or it did not resize at
> all.
>
> The problem with this is if a user supplies a very large msize
> value, resize would simply fail and the user would stick to
> the default maximum msize supported by the virtio transport.
>
> To resolve this potential issue, change vq_sg_resize() to resize
> the passed sg list to whatever is possible on the machine.
>
> Signed-off-by: Christian Schoenebeck <linux_oss@crudebyte.com>
> ---
> net/9p/trans_virtio.c | 65 ++++++++++++++++++++++++++++++++++++-------
> 1 file changed, 55 insertions(+), 10 deletions(-)
>
> diff --git a/net/9p/trans_virtio.c b/net/9p/trans_virtio.c
> index 4cb75f45aa15..b9bab7ed2768 100644
> --- a/net/9p/trans_virtio.c
> +++ b/net/9p/trans_virtio.c
> @@ -205,24 +205,66 @@ static struct virtqueue_sg *vq_sg_alloc(unsigned int
> nsgl) * amount of lists
> * @_vq_sg: scatter/gather lists to be resized
> * @nsgl: new amount of scatter/gather lists
> + *
> + * Old scatter/gather lists are retained. Only growing the size is
> supported. + * If the requested amount cannot be satisfied, then lists are
> increased to + * whatever is possible.
> */
> static int vq_sg_resize(struct virtqueue_sg **_vq_sg, unsigned int nsgl)
> {
> struct virtqueue_sg *vq_sg;
> + unsigned int i;
> + size_t sz;
> + int ret = 0;
>
> BUG_ON(!_vq_sg || !nsgl);
> vq_sg = *_vq_sg;
> + if (nsgl > VIRTQUEUE_SG_NSGL_MAX)
> + nsgl = VIRTQUEUE_SG_NSGL_MAX;
> if (vq_sg->nsgl == nsgl)
> return 0;
> + if (vq_sg->nsgl > nsgl)
> + return -ENOTSUPP;
> +
> + vq_sg = kzalloc(sizeof(struct virtqueue_sg) +
> + nsgl * sizeof(struct scatterlist *),
> + GFP_KERNEL);
>
> - /* lazy resize implementation for now */
> - vq_sg = vq_sg_alloc(nsgl);
> if (!vq_sg)
> return -ENOMEM;
>
> - kfree(*_vq_sg);
> + /* copy over old scatter gather lists */
> + sz = sizeof(struct virtqueue_sg) +
> + (*_vq_sg)->nsgl * sizeof(struct scatterlist *);
> + memcpy(vq_sg, *_vq_sg, sz);
Missing
kfree(*_vq_sg);
here.
> +
> + vq_sg->nsgl = nsgl;
> +
> + for (i = (*_vq_sg)->nsgl; i < nsgl; ++i) {
> + vq_sg->sgl[i] = kmalloc_array(
> + SG_MAX_SINGLE_ALLOC, sizeof(struct scatterlist),
> + GFP_KERNEL
> + );
> + /*
> + * handle failed allocation as soft error, we take whatever
> + * we get
> + */
> + if (!vq_sg->sgl[i]) {
> + ret = -ENOMEM;
> + vq_sg->nsgl = nsgl = i;
> + break;
> + }
> + sg_init_table(vq_sg->sgl[i], SG_MAX_SINGLE_ALLOC);
> + if (i) {
> + /* chain the lists */
> + sg_chain(vq_sg->sgl[i - 1], SG_MAX_SINGLE_ALLOC,
> + vq_sg->sgl[i]);
> + }
> + }
> + sg_mark_end(&vq_sg->sgl[nsgl - 1][SG_MAX_SINGLE_ALLOC - 1]);
> +
> *_vq_sg = vq_sg;
> - return 0;
> + return ret;
> }
>
> /**
> @@ -839,13 +881,16 @@ p9_virtio_create(struct p9_client *client, const char
> *devname, char *args) if (nsgl > chan->vq_sg->nsgl) {
> /*
> * if resize fails, no big deal, then just
> - * continue with default msize instead
> + * continue with whatever we got
> */
> - if (!vq_sg_resize(&chan->vq_sg, nsgl)) {
> - client->trans_maxsize =
> - PAGE_SIZE *
> - ((nsgl * SG_USER_PAGES_PER_LIST) - 3);
> - }
> + vq_sg_resize(&chan->vq_sg, nsgl);
> + /*
> + * actual allocation size might be less than
> + * requested, so use vq_sg->nsgl instead of nsgl
> + */
> + client->trans_maxsize =
> + PAGE_SIZE * ((chan->vq_sg->nsgl *
> + SG_USER_PAGES_PER_LIST) - 3);
As with patch 6, here should probably be an additional
if (chan->vq_sg->nsgl < nsgl) {
pr_inf(...);
}
to explain the user that not all required sg lists could be allocated suiting
user's requested msize option.
> }
> #endif /* CONFIG_ARCH_NO_SG_CHAIN */
> }
^ permalink raw reply [flat|nested] 11+ messages in thread