From: David Howells <dhowells@redhat.com>
To: Matthew Wilcox <willy@infradead.org>,
"David S. Miller" <davem@davemloft.net>,
Eric Dumazet <edumazet@google.com>,
Jakub Kicinski <kuba@kernel.org>, Paolo Abeni <pabeni@redhat.com>
Cc: David Howells <dhowells@redhat.com>,
Al Viro <viro@zeniv.linux.org.uk>,
Christoph Hellwig <hch@infradead.org>,
Jens Axboe <axboe@kernel.dk>, Jeff Layton <jlayton@kernel.org>,
Christian Brauner <brauner@kernel.org>,
Chuck Lever III <chuck.lever@oracle.com>,
Linus Torvalds <torvalds@linux-foundation.org>,
netdev@vger.kernel.org, linux-fsdevel@vger.kernel.org,
linux-kernel@vger.kernel.org, linux-mm@kvack.org,
Herbert Xu <herbert@gondor.apana.org.au>,
linux-crypto@vger.kernel.org
Subject: [RFC PATCH v2 22/48] crypto: af_alg: Use netfs_extract_iter_to_sg() to create scatterlists
Date: Wed, 29 Mar 2023 15:13:28 +0100 [thread overview]
Message-ID: <20230329141354.516864-23-dhowells@redhat.com> (raw)
In-Reply-To: <20230329141354.516864-1-dhowells@redhat.com>
Use netfs_extract_iter_to_sg() to decant the destination iterator into a
scatterlist in af_alg_get_rsgl(). af_alg_make_sg() can then be removed.
[!] Note that if this fits, netfs_extract_iter_to_sg() should move to core
code.
Signed-off-by: David Howells <dhowells@redhat.com>
cc: Herbert Xu <herbert@gondor.apana.org.au>
cc: linux-crypto@vger.kernel.org
---
crypto/af_alg.c | 55 ++++++++++-------------------------------
crypto/algif_aead.c | 12 ++++-----
crypto/algif_hash.c | 18 ++++++++++----
crypto/algif_skcipher.c | 2 +-
include/crypto/if_alg.h | 6 ++---
5 files changed, 35 insertions(+), 58 deletions(-)
diff --git a/crypto/af_alg.c b/crypto/af_alg.c
index 7caff10df643..1dafd088ad45 100644
--- a/crypto/af_alg.c
+++ b/crypto/af_alg.c
@@ -22,6 +22,7 @@
#include <linux/sched/signal.h>
#include <linux/security.h>
#include <linux/string.h>
+#include <linux/netfs.h>
#include <keys/user-type.h>
#include <keys/trusted-type.h>
#include <keys/encrypted-type.h>
@@ -531,45 +532,11 @@ static const struct net_proto_family alg_family = {
.owner = THIS_MODULE,
};
-int af_alg_make_sg(struct af_alg_sgl *sgl, struct iov_iter *iter, int len)
-{
- struct page **pages = sgl->pages;
- size_t off;
- ssize_t n;
- int npages, i;
-
- n = iov_iter_extract_pages(iter, &pages, len, ALG_MAX_PAGES, 0, &off);
- if (n < 0)
- return n;
-
- sgl->need_unpin = iov_iter_extract_will_pin(iter);
-
- npages = DIV_ROUND_UP(off + n, PAGE_SIZE);
- if (WARN_ON(npages == 0))
- return -EINVAL;
- /* Add one extra for linking */
- sg_init_table(sgl->sg, npages + 1);
-
- for (i = 0, len = n; i < npages; i++) {
- int plen = min_t(int, len, PAGE_SIZE - off);
-
- sg_set_page(sgl->sg + i, sgl->pages[i], plen, off);
-
- off = 0;
- len -= plen;
- }
- sg_mark_end(sgl->sg + npages - 1);
- sgl->npages = npages;
-
- return n;
-}
-EXPORT_SYMBOL_GPL(af_alg_make_sg);
-
static void af_alg_link_sg(struct af_alg_sgl *sgl_prev,
struct af_alg_sgl *sgl_new)
{
- sg_unmark_end(sgl_prev->sg + sgl_prev->npages - 1);
- sg_chain(sgl_prev->sg, sgl_prev->npages + 1, sgl_new->sg);
+ sg_unmark_end(sgl_prev->sgt.sgl + sgl_prev->sgt.nents - 1);
+ sg_chain(sgl_prev->sgt.sgl, sgl_prev->sgt.nents + 1, sgl_new->sgt.sgl);
}
void af_alg_free_sg(struct af_alg_sgl *sgl)
@@ -577,8 +544,8 @@ void af_alg_free_sg(struct af_alg_sgl *sgl)
int i;
if (sgl->need_unpin)
- for (i = 0; i < sgl->npages; i++)
- unpin_user_page(sgl->pages[i]);
+ for (i = 0; i < sgl->sgt.nents; i++)
+ unpin_user_page(sg_page(&sgl->sgt.sgl[i]));
}
EXPORT_SYMBOL_GPL(af_alg_free_sg);
@@ -1292,8 +1259,8 @@ int af_alg_get_rsgl(struct sock *sk, struct msghdr *msg, int flags,
while (maxsize > len && msg_data_left(msg)) {
struct af_alg_rsgl *rsgl;
+ ssize_t err;
size_t seglen;
- int err;
/* limit the amount of readable buffers */
if (!af_alg_readable(sk))
@@ -1310,16 +1277,20 @@ int af_alg_get_rsgl(struct sock *sk, struct msghdr *msg, int flags,
return -ENOMEM;
}
- rsgl->sgl.npages = 0;
+ rsgl->sgl.sgt.sgl = rsgl->sgl.sgl;
+ rsgl->sgl.sgt.nents = 0;
+ rsgl->sgl.sgt.orig_nents = 0;
list_add_tail(&rsgl->list, &areq->rsgl_list);
- /* make one iovec available as scatterlist */
- err = af_alg_make_sg(&rsgl->sgl, &msg->msg_iter, seglen);
+ err = netfs_extract_iter_to_sg(&msg->msg_iter, seglen,
+ &rsgl->sgl.sgt, ALG_MAX_PAGES, 0);
if (err < 0) {
rsgl->sg_num_bytes = 0;
return err;
}
+ rsgl->sgl.need_unpin = iov_iter_extract_will_pin(&msg->msg_iter);
+
/* chain the new scatterlist with previous one */
if (areq->last_rsgl)
af_alg_link_sg(&areq->last_rsgl->sgl, &rsgl->sgl);
diff --git a/crypto/algif_aead.c b/crypto/algif_aead.c
index 42493b4d8ce4..f6aa3856d8d5 100644
--- a/crypto/algif_aead.c
+++ b/crypto/algif_aead.c
@@ -210,7 +210,7 @@ static int _aead_recvmsg(struct socket *sock, struct msghdr *msg,
*/
/* Use the RX SGL as source (and destination) for crypto op. */
- rsgl_src = areq->first_rsgl.sgl.sg;
+ rsgl_src = areq->first_rsgl.sgl.sgt.sgl;
if (ctx->enc) {
/*
@@ -224,7 +224,7 @@ static int _aead_recvmsg(struct socket *sock, struct msghdr *msg,
* RX SGL: AAD || PT || Tag
*/
err = crypto_aead_copy_sgl(null_tfm, tsgl_src,
- areq->first_rsgl.sgl.sg, processed);
+ areq->first_rsgl.sgl.sgt.sgl, processed);
if (err)
goto free;
af_alg_pull_tsgl(sk, processed, NULL, 0);
@@ -242,7 +242,7 @@ static int _aead_recvmsg(struct socket *sock, struct msghdr *msg,
/* Copy AAD || CT to RX SGL buffer for in-place operation. */
err = crypto_aead_copy_sgl(null_tfm, tsgl_src,
- areq->first_rsgl.sgl.sg, outlen);
+ areq->first_rsgl.sgl.sgt.sgl, outlen);
if (err)
goto free;
@@ -268,8 +268,8 @@ static int _aead_recvmsg(struct socket *sock, struct msghdr *msg,
/* RX SGL present */
struct af_alg_sgl *sgl_prev = &areq->last_rsgl->sgl;
- sg_unmark_end(sgl_prev->sg + sgl_prev->npages - 1);
- sg_chain(sgl_prev->sg, sgl_prev->npages + 1,
+ sg_unmark_end(sgl_prev->sgt.sgl + sgl_prev->sgt.nents - 1);
+ sg_chain(sgl_prev->sgt.sgl, sgl_prev->sgt.nents + 1,
areq->tsgl);
} else
/* no RX SGL present (e.g. authentication only) */
@@ -278,7 +278,7 @@ static int _aead_recvmsg(struct socket *sock, struct msghdr *msg,
/* Initialize the crypto operation */
aead_request_set_crypt(&areq->cra_u.aead_req, rsgl_src,
- areq->first_rsgl.sgl.sg, used, ctx->iv);
+ areq->first_rsgl.sgl.sgt.sgl, used, ctx->iv);
aead_request_set_ad(&areq->cra_u.aead_req, ctx->aead_assoclen);
aead_request_set_tfm(&areq->cra_u.aead_req, tfm);
diff --git a/crypto/algif_hash.c b/crypto/algif_hash.c
index 1d017ec5c63c..f051fa624bd7 100644
--- a/crypto/algif_hash.c
+++ b/crypto/algif_hash.c
@@ -14,6 +14,7 @@
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/net.h>
+#include <linux/netfs.h>
#include <net/sock.h>
struct hash_ctx {
@@ -91,13 +92,20 @@ static int hash_sendmsg(struct socket *sock, struct msghdr *msg,
if (len > limit)
len = limit;
- len = af_alg_make_sg(&ctx->sgl, &msg->msg_iter, len);
+ ctx->sgl.sgt.sgl = ctx->sgl.sgl;
+ ctx->sgl.sgt.nents = 0;
+ ctx->sgl.sgt.orig_nents = 0;
+
+ len = netfs_extract_iter_to_sg(&msg->msg_iter, len,
+ &ctx->sgl.sgt, ALG_MAX_PAGES, 0);
if (len < 0) {
err = copied ? 0 : len;
goto unlock;
}
- ahash_request_set_crypt(&ctx->req, ctx->sgl.sg, NULL, len);
+ ctx->sgl.need_unpin = iov_iter_extract_will_pin(&msg->msg_iter);
+
+ ahash_request_set_crypt(&ctx->req, ctx->sgl.sgt.sgl, NULL, len);
err = crypto_wait_req(crypto_ahash_update(&ctx->req),
&ctx->wait);
@@ -141,8 +149,8 @@ static ssize_t hash_sendpage(struct socket *sock, struct page *page,
flags |= MSG_MORE;
lock_sock(sk);
- sg_init_table(ctx->sgl.sg, 1);
- sg_set_page(ctx->sgl.sg, page, size, offset);
+ sg_init_table(ctx->sgl.sgl, 1);
+ sg_set_page(ctx->sgl.sgl, page, size, offset);
if (!(flags & MSG_MORE)) {
err = hash_alloc_result(sk, ctx);
@@ -151,7 +159,7 @@ static ssize_t hash_sendpage(struct socket *sock, struct page *page,
} else if (!ctx->more)
hash_free_result(sk, ctx);
- ahash_request_set_crypt(&ctx->req, ctx->sgl.sg, ctx->result, size);
+ ahash_request_set_crypt(&ctx->req, ctx->sgl.sgl, ctx->result, size);
if (!(flags & MSG_MORE)) {
if (ctx->more)
diff --git a/crypto/algif_skcipher.c b/crypto/algif_skcipher.c
index ee8890ee8f33..a251cd6bd5b9 100644
--- a/crypto/algif_skcipher.c
+++ b/crypto/algif_skcipher.c
@@ -105,7 +105,7 @@ static int _skcipher_recvmsg(struct socket *sock, struct msghdr *msg,
/* Initialize the crypto operation */
skcipher_request_set_tfm(&areq->cra_u.skcipher_req, tfm);
skcipher_request_set_crypt(&areq->cra_u.skcipher_req, areq->tsgl,
- areq->first_rsgl.sgl.sg, len, ctx->iv);
+ areq->first_rsgl.sgl.sgt.sgl, len, ctx->iv);
if (msg->msg_iocb && !is_sync_kiocb(msg->msg_iocb)) {
/* AIO operation */
diff --git a/include/crypto/if_alg.h b/include/crypto/if_alg.h
index 46494b33f5bc..34224e77f5a2 100644
--- a/include/crypto/if_alg.h
+++ b/include/crypto/if_alg.h
@@ -56,9 +56,8 @@ struct af_alg_type {
};
struct af_alg_sgl {
- struct scatterlist sg[ALG_MAX_PAGES + 1];
- struct page *pages[ALG_MAX_PAGES];
- unsigned int npages;
+ struct sg_table sgt;
+ struct scatterlist sgl[ALG_MAX_PAGES + 1];
bool need_unpin;
};
@@ -164,7 +163,6 @@ int af_alg_release(struct socket *sock);
void af_alg_release_parent(struct sock *sk);
int af_alg_accept(struct sock *sk, struct socket *newsock, bool kern);
-int af_alg_make_sg(struct af_alg_sgl *sgl, struct iov_iter *iter, int len);
void af_alg_free_sg(struct af_alg_sgl *sgl);
static inline struct alg_sock *alg_sk(struct sock *sk)
next prev parent reply other threads:[~2023-03-29 14:15 UTC|newest]
Thread overview: 77+ messages / expand[flat|nested] mbox.gz Atom feed top
[not found] <e128356a-f56f-4c02-7437-dfea38e4194b@suse.de>
2023-03-29 14:13 ` [RFC PATCH v2 00/48] splice, net: Replace sendpage with sendmsg(MSG_SPLICE_PAGES) David Howells
2023-03-29 14:13 ` [RFC PATCH v2 01/48] netfs: Fix netfs_extract_iter_to_sg() for ITER_UBUF/IOVEC David Howells
2023-03-29 14:13 ` [RFC PATCH v2 02/48] iov_iter: Remove last_offset member David Howells
2023-03-29 14:13 ` [RFC PATCH v2 03/48] iov_iter: Add an iterator-of-iterators David Howells
2023-03-29 14:13 ` [RFC PATCH v2 04/48] net: Declare MSG_SPLICE_PAGES internal sendmsg() flag David Howells
2023-03-30 14:28 ` Willem de Bruijn
2023-03-30 15:07 ` David Howells
2023-03-30 17:51 ` Willem de Bruijn
2023-03-29 14:13 ` [RFC PATCH v2 05/48] mm: Move the page fragment allocator from page_alloc.c into its own file David Howells
2023-03-29 14:13 ` [RFC PATCH v2 06/48] mm: Make the page_frag_cache allocator use multipage folios David Howells
2023-03-29 14:13 ` [RFC PATCH v2 07/48] mm: Make the page_frag_cache allocator use per-cpu David Howells
2023-03-29 14:13 ` [RFC PATCH v2 08/48] tcp: Support MSG_SPLICE_PAGES David Howells
2023-03-29 14:13 ` [RFC PATCH v2 09/48] tcp: Make sendmsg(MSG_SPLICE_PAGES) copy unspliceable data David Howells
2023-03-29 14:13 ` [RFC PATCH v2 10/48] tcp: Convert do_tcp_sendpages() to use MSG_SPLICE_PAGES David Howells
2023-03-29 14:13 ` [RFC PATCH v2 11/48] tcp_bpf: Inline do_tcp_sendpages as it's now a wrapper around tcp_sendmsg David Howells
2023-03-29 14:13 ` [RFC PATCH v2 12/48] espintcp: Inline do_tcp_sendpages() David Howells
2023-03-29 14:13 ` [RFC PATCH v2 13/48] tls: " David Howells
2023-03-29 14:13 ` [RFC PATCH v2 14/48] siw: " David Howells
2023-03-29 14:13 ` [RFC PATCH v2 15/48] tcp: Fold do_tcp_sendpages() into tcp_sendpage_locked() David Howells
2023-03-29 14:13 ` [RFC PATCH v2 16/48] ip, udp: Support MSG_SPLICE_PAGES David Howells
2023-03-30 14:20 ` Willem de Bruijn
2023-03-30 14:39 ` David Howells
2023-03-30 17:46 ` Willem de Bruijn
2023-03-30 15:11 ` David Howells
2023-03-30 17:55 ` Willem de Bruijn
2023-03-30 19:49 ` David Howells
2023-03-29 14:13 ` [RFC PATCH v2 17/48] ip, udp: Make sendmsg(MSG_SPLICE_PAGES) copy unspliceable data David Howells
2023-03-29 14:13 ` [RFC PATCH v2 18/48] udp: Convert udp_sendpage() to use MSG_SPLICE_PAGES David Howells
2023-03-29 14:13 ` [RFC PATCH v2 19/48] af_unix: Support MSG_SPLICE_PAGES David Howells
2023-03-29 14:13 ` [RFC PATCH v2 20/48] af_unix: Make sendmsg(MSG_SPLICE_PAGES) copy unspliceable data David Howells
2023-03-29 14:13 ` [RFC PATCH v2 21/48] crypto: af_alg: Pin pages rather than ref'ing if appropriate David Howells
2023-03-29 14:13 ` David Howells [this message]
2023-03-29 14:13 ` [RFC PATCH v2 23/48] crypto: af_alg: Indent the loop in af_alg_sendmsg() David Howells
2023-03-29 14:13 ` [RFC PATCH v2 24/48] crypto: af_alg: Support MSG_SPLICE_PAGES David Howells
2023-03-29 14:13 ` [RFC PATCH v2 25/48] crypto: af_alg: Convert af_alg_sendpage() to use MSG_SPLICE_PAGES David Howells
2023-03-29 14:13 ` [RFC PATCH v2 26/48] crypto: af_alg/hash: Support MSG_SPLICE_PAGES David Howells
2023-03-29 14:13 ` [RFC PATCH v2 27/48] splice, net: Use sendmsg(MSG_SPLICE_PAGES) rather than ->sendpage() David Howells
2023-03-29 14:13 ` [RFC PATCH v2 28/48] splice: Reimplement splice_to_socket() to pass multiple bufs to sendmsg() David Howells
2023-03-29 14:13 ` [RFC PATCH v2 29/48] Remove file->f_op->sendpage David Howells
2023-03-29 14:13 ` [RFC PATCH v2 30/48] siw: Use sendmsg(MSG_SPLICE_PAGES) rather than sendpage to transmit David Howells
2023-03-29 15:18 ` Bernard Metzler
2023-03-29 15:32 ` David Howells
2023-03-29 14:13 ` [RFC PATCH v2 31/48] ceph: Use sendmsg(MSG_SPLICE_PAGES) rather than sendpage David Howells
2023-03-29 14:13 ` [RFC PATCH v2 32/48] iscsi: " David Howells
2023-03-29 14:13 ` [RFC PATCH v2 33/48] iscsi: Assume "sendpage" is okay in iscsi_tcp_segment_map() David Howells
2023-03-29 14:13 ` [RFC PATCH v2 34/48] tcp_bpf: Make tcp_bpf_sendpage() go through tcp_bpf_sendmsg(MSG_SPLICE_PAGES) David Howells
2023-03-29 14:13 ` [RFC PATCH v2 35/48] net: Use sendmsg(MSG_SPLICE_PAGES) not sendpage in skb_send_sock() David Howells
2023-03-29 14:13 ` [RFC PATCH v2 36/48] algif: Remove hash_sendpage*() David Howells
2023-03-29 14:13 ` [RFC PATCH v2 37/48] ceph: Use sendmsg(MSG_SPLICE_PAGES) rather than sendpage() David Howells
2023-03-30 1:45 ` Xiubo Li
2023-03-30 6:48 ` David Howells
2023-03-31 13:05 ` Xiubo Li
2023-04-03 3:27 ` Xiubo Li
2023-04-03 8:32 ` David Howells
2023-04-10 0:38 ` Xiubo Li
2023-03-29 14:13 ` [RFC PATCH v2 38/48] rds: Use sendmsg(MSG_SPLICE_PAGES) rather than sendpage David Howells
2023-03-29 14:13 ` [RFC PATCH v2 39/48] dlm: " David Howells
2023-03-29 14:13 ` [RFC PATCH v2 40/48] sunrpc: Use sendmsg(MSG_SPLICE_PAGES) rather then sendpage David Howells
2023-03-29 15:28 ` Chuck Lever III
2023-03-29 19:58 ` David Howells
2023-03-30 9:29 ` David Howells
2023-03-30 9:41 ` David Howells
2023-03-30 13:16 ` Chuck Lever III
2023-03-30 13:01 ` David Howells
2023-03-30 13:16 ` David Howells
2023-03-30 13:27 ` Chuck Lever III
2023-03-30 14:26 ` David Howells
2023-03-30 16:36 ` Chuck Lever III
2023-04-14 14:41 ` Daire Byrne
2023-03-29 14:13 ` [RFC PATCH v2 41/48] sunrpc: Rely on TCP sendmsg + MSG_SPLICE_PAGES to copy unspliceable data David Howells
2023-03-29 14:13 ` [RFC PATCH v2 42/48] nvme: Use sendmsg(MSG_SPLICE_PAGES) rather then sendpage David Howells
2023-03-29 14:13 ` [RFC PATCH v2 43/48] kcm: " David Howells
2023-03-29 14:13 ` [RFC PATCH v2 44/48] smc: Drop smc_sendpage() in favour of smc_sendmsg() + MSG_SPLICE_PAGES David Howells
2023-03-29 14:13 ` [RFC PATCH v2 45/48] ocfs2: Use sendmsg(MSG_SPLICE_PAGES) rather than sendpage() David Howells
2023-03-29 14:13 ` [RFC PATCH v2 46/48] drbd: Use sendmsg(MSG_SPLICE_PAGES) rather than sendmsg() David Howells
2023-03-29 14:13 ` [RFC PATCH v2 47/48] drdb: Send an entire bio in a single sendmsg David Howells
[not found] ` <20230329141354.516864-49-dhowells@redhat.com>
2023-03-29 14:39 ` [RFC PATCH v2 48/48] sock: Remove ->sendpage*() in favour of sendmsg(MSG_SPLICE_PAGES) David Howells
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20230329141354.516864-23-dhowells@redhat.com \
--to=dhowells@redhat.com \
--cc=axboe@kernel.dk \
--cc=brauner@kernel.org \
--cc=chuck.lever@oracle.com \
--cc=davem@davemloft.net \
--cc=edumazet@google.com \
--cc=hch@infradead.org \
--cc=herbert@gondor.apana.org.au \
--cc=jlayton@kernel.org \
--cc=kuba@kernel.org \
--cc=linux-crypto@vger.kernel.org \
--cc=linux-fsdevel@vger.kernel.org \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-mm@kvack.org \
--cc=netdev@vger.kernel.org \
--cc=pabeni@redhat.com \
--cc=torvalds@linux-foundation.org \
--cc=viro@zeniv.linux.org.uk \
--cc=willy@infradead.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).