From: David Howells <dhowells@redhat.com>
To: Matthew Wilcox <willy@infradead.org>,
"David S. Miller" <davem@davemloft.net>,
Eric Dumazet <edumazet@google.com>,
Jakub Kicinski <kuba@kernel.org>, Paolo Abeni <pabeni@redhat.com>
Cc: David Howells <dhowells@redhat.com>,
Al Viro <viro@zeniv.linux.org.uk>,
Christoph Hellwig <hch@infradead.org>,
Jens Axboe <axboe@kernel.dk>, Jeff Layton <jlayton@kernel.org>,
Christian Brauner <brauner@kernel.org>,
Chuck Lever III <chuck.lever@oracle.com>,
Linus Torvalds <torvalds@linux-foundation.org>,
netdev@vger.kernel.org, linux-fsdevel@vger.kernel.org,
linux-kernel@vger.kernel.org, linux-mm@kvack.org,
Herbert Xu <herbert@gondor.apana.org.au>,
linux-crypto@vger.kernel.org
Subject: [PATCH v3 22/55] crypto: af_alg: Use netfs_extract_iter_to_sg() to create scatterlists
Date: Fri, 31 Mar 2023 17:08:41 +0100 [thread overview]
Message-ID: <20230331160914.1608208-23-dhowells@redhat.com> (raw)
In-Reply-To: <20230331160914.1608208-1-dhowells@redhat.com>
Use netfs_extract_iter_to_sg() to decant the destination iterator into a
scatterlist in af_alg_get_rsgl(). af_alg_make_sg() can then be removed.
[!] Note that if this fits, netfs_extract_iter_to_sg() should move to core
code.
Signed-off-by: David Howells <dhowells@redhat.com>
cc: Herbert Xu <herbert@gondor.apana.org.au>
cc: "David S. Miller" <davem@davemloft.net>
cc: Eric Dumazet <edumazet@google.com>
cc: Jakub Kicinski <kuba@kernel.org>
cc: Paolo Abeni <pabeni@redhat.com>
cc: Jens Axboe <axboe@kernel.dk>
cc: Matthew Wilcox <willy@infradead.org>
cc: linux-crypto@vger.kernel.org
cc: netdev@vger.kernel.org
---
crypto/af_alg.c | 55 ++++++++++-------------------------------
crypto/algif_aead.c | 12 ++++-----
crypto/algif_hash.c | 18 ++++++++++----
crypto/algif_skcipher.c | 2 +-
include/crypto/if_alg.h | 6 ++---
5 files changed, 35 insertions(+), 58 deletions(-)
diff --git a/crypto/af_alg.c b/crypto/af_alg.c
index 7caff10df643..1dafd088ad45 100644
--- a/crypto/af_alg.c
+++ b/crypto/af_alg.c
@@ -22,6 +22,7 @@
#include <linux/sched/signal.h>
#include <linux/security.h>
#include <linux/string.h>
+#include <linux/netfs.h>
#include <keys/user-type.h>
#include <keys/trusted-type.h>
#include <keys/encrypted-type.h>
@@ -531,45 +532,11 @@ static const struct net_proto_family alg_family = {
.owner = THIS_MODULE,
};
-int af_alg_make_sg(struct af_alg_sgl *sgl, struct iov_iter *iter, int len)
-{
- struct page **pages = sgl->pages;
- size_t off;
- ssize_t n;
- int npages, i;
-
- n = iov_iter_extract_pages(iter, &pages, len, ALG_MAX_PAGES, 0, &off);
- if (n < 0)
- return n;
-
- sgl->need_unpin = iov_iter_extract_will_pin(iter);
-
- npages = DIV_ROUND_UP(off + n, PAGE_SIZE);
- if (WARN_ON(npages == 0))
- return -EINVAL;
- /* Add one extra for linking */
- sg_init_table(sgl->sg, npages + 1);
-
- for (i = 0, len = n; i < npages; i++) {
- int plen = min_t(int, len, PAGE_SIZE - off);
-
- sg_set_page(sgl->sg + i, sgl->pages[i], plen, off);
-
- off = 0;
- len -= plen;
- }
- sg_mark_end(sgl->sg + npages - 1);
- sgl->npages = npages;
-
- return n;
-}
-EXPORT_SYMBOL_GPL(af_alg_make_sg);
-
static void af_alg_link_sg(struct af_alg_sgl *sgl_prev,
struct af_alg_sgl *sgl_new)
{
- sg_unmark_end(sgl_prev->sg + sgl_prev->npages - 1);
- sg_chain(sgl_prev->sg, sgl_prev->npages + 1, sgl_new->sg);
+ sg_unmark_end(sgl_prev->sgt.sgl + sgl_prev->sgt.nents - 1);
+ sg_chain(sgl_prev->sgt.sgl, sgl_prev->sgt.nents + 1, sgl_new->sgt.sgl);
}
void af_alg_free_sg(struct af_alg_sgl *sgl)
@@ -577,8 +544,8 @@ void af_alg_free_sg(struct af_alg_sgl *sgl)
int i;
if (sgl->need_unpin)
- for (i = 0; i < sgl->npages; i++)
- unpin_user_page(sgl->pages[i]);
+ for (i = 0; i < sgl->sgt.nents; i++)
+ unpin_user_page(sg_page(&sgl->sgt.sgl[i]));
}
EXPORT_SYMBOL_GPL(af_alg_free_sg);
@@ -1292,8 +1259,8 @@ int af_alg_get_rsgl(struct sock *sk, struct msghdr *msg, int flags,
while (maxsize > len && msg_data_left(msg)) {
struct af_alg_rsgl *rsgl;
+ ssize_t err;
size_t seglen;
- int err;
/* limit the amount of readable buffers */
if (!af_alg_readable(sk))
@@ -1310,16 +1277,20 @@ int af_alg_get_rsgl(struct sock *sk, struct msghdr *msg, int flags,
return -ENOMEM;
}
- rsgl->sgl.npages = 0;
+ rsgl->sgl.sgt.sgl = rsgl->sgl.sgl;
+ rsgl->sgl.sgt.nents = 0;
+ rsgl->sgl.sgt.orig_nents = 0;
list_add_tail(&rsgl->list, &areq->rsgl_list);
- /* make one iovec available as scatterlist */
- err = af_alg_make_sg(&rsgl->sgl, &msg->msg_iter, seglen);
+ err = netfs_extract_iter_to_sg(&msg->msg_iter, seglen,
+ &rsgl->sgl.sgt, ALG_MAX_PAGES, 0);
if (err < 0) {
rsgl->sg_num_bytes = 0;
return err;
}
+ rsgl->sgl.need_unpin = iov_iter_extract_will_pin(&msg->msg_iter);
+
/* chain the new scatterlist with previous one */
if (areq->last_rsgl)
af_alg_link_sg(&areq->last_rsgl->sgl, &rsgl->sgl);
diff --git a/crypto/algif_aead.c b/crypto/algif_aead.c
index 42493b4d8ce4..f6aa3856d8d5 100644
--- a/crypto/algif_aead.c
+++ b/crypto/algif_aead.c
@@ -210,7 +210,7 @@ static int _aead_recvmsg(struct socket *sock, struct msghdr *msg,
*/
/* Use the RX SGL as source (and destination) for crypto op. */
- rsgl_src = areq->first_rsgl.sgl.sg;
+ rsgl_src = areq->first_rsgl.sgl.sgt.sgl;
if (ctx->enc) {
/*
@@ -224,7 +224,7 @@ static int _aead_recvmsg(struct socket *sock, struct msghdr *msg,
* RX SGL: AAD || PT || Tag
*/
err = crypto_aead_copy_sgl(null_tfm, tsgl_src,
- areq->first_rsgl.sgl.sg, processed);
+ areq->first_rsgl.sgl.sgt.sgl, processed);
if (err)
goto free;
af_alg_pull_tsgl(sk, processed, NULL, 0);
@@ -242,7 +242,7 @@ static int _aead_recvmsg(struct socket *sock, struct msghdr *msg,
/* Copy AAD || CT to RX SGL buffer for in-place operation. */
err = crypto_aead_copy_sgl(null_tfm, tsgl_src,
- areq->first_rsgl.sgl.sg, outlen);
+ areq->first_rsgl.sgl.sgt.sgl, outlen);
if (err)
goto free;
@@ -268,8 +268,8 @@ static int _aead_recvmsg(struct socket *sock, struct msghdr *msg,
/* RX SGL present */
struct af_alg_sgl *sgl_prev = &areq->last_rsgl->sgl;
- sg_unmark_end(sgl_prev->sg + sgl_prev->npages - 1);
- sg_chain(sgl_prev->sg, sgl_prev->npages + 1,
+ sg_unmark_end(sgl_prev->sgt.sgl + sgl_prev->sgt.nents - 1);
+ sg_chain(sgl_prev->sgt.sgl, sgl_prev->sgt.nents + 1,
areq->tsgl);
} else
/* no RX SGL present (e.g. authentication only) */
@@ -278,7 +278,7 @@ static int _aead_recvmsg(struct socket *sock, struct msghdr *msg,
/* Initialize the crypto operation */
aead_request_set_crypt(&areq->cra_u.aead_req, rsgl_src,
- areq->first_rsgl.sgl.sg, used, ctx->iv);
+ areq->first_rsgl.sgl.sgt.sgl, used, ctx->iv);
aead_request_set_ad(&areq->cra_u.aead_req, ctx->aead_assoclen);
aead_request_set_tfm(&areq->cra_u.aead_req, tfm);
diff --git a/crypto/algif_hash.c b/crypto/algif_hash.c
index 1d017ec5c63c..f051fa624bd7 100644
--- a/crypto/algif_hash.c
+++ b/crypto/algif_hash.c
@@ -14,6 +14,7 @@
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/net.h>
+#include <linux/netfs.h>
#include <net/sock.h>
struct hash_ctx {
@@ -91,13 +92,20 @@ static int hash_sendmsg(struct socket *sock, struct msghdr *msg,
if (len > limit)
len = limit;
- len = af_alg_make_sg(&ctx->sgl, &msg->msg_iter, len);
+ ctx->sgl.sgt.sgl = ctx->sgl.sgl;
+ ctx->sgl.sgt.nents = 0;
+ ctx->sgl.sgt.orig_nents = 0;
+
+ len = netfs_extract_iter_to_sg(&msg->msg_iter, len,
+ &ctx->sgl.sgt, ALG_MAX_PAGES, 0);
if (len < 0) {
err = copied ? 0 : len;
goto unlock;
}
- ahash_request_set_crypt(&ctx->req, ctx->sgl.sg, NULL, len);
+ ctx->sgl.need_unpin = iov_iter_extract_will_pin(&msg->msg_iter);
+
+ ahash_request_set_crypt(&ctx->req, ctx->sgl.sgt.sgl, NULL, len);
err = crypto_wait_req(crypto_ahash_update(&ctx->req),
&ctx->wait);
@@ -141,8 +149,8 @@ static ssize_t hash_sendpage(struct socket *sock, struct page *page,
flags |= MSG_MORE;
lock_sock(sk);
- sg_init_table(ctx->sgl.sg, 1);
- sg_set_page(ctx->sgl.sg, page, size, offset);
+ sg_init_table(ctx->sgl.sgl, 1);
+ sg_set_page(ctx->sgl.sgl, page, size, offset);
if (!(flags & MSG_MORE)) {
err = hash_alloc_result(sk, ctx);
@@ -151,7 +159,7 @@ static ssize_t hash_sendpage(struct socket *sock, struct page *page,
} else if (!ctx->more)
hash_free_result(sk, ctx);
- ahash_request_set_crypt(&ctx->req, ctx->sgl.sg, ctx->result, size);
+ ahash_request_set_crypt(&ctx->req, ctx->sgl.sgl, ctx->result, size);
if (!(flags & MSG_MORE)) {
if (ctx->more)
diff --git a/crypto/algif_skcipher.c b/crypto/algif_skcipher.c
index ee8890ee8f33..a251cd6bd5b9 100644
--- a/crypto/algif_skcipher.c
+++ b/crypto/algif_skcipher.c
@@ -105,7 +105,7 @@ static int _skcipher_recvmsg(struct socket *sock, struct msghdr *msg,
/* Initialize the crypto operation */
skcipher_request_set_tfm(&areq->cra_u.skcipher_req, tfm);
skcipher_request_set_crypt(&areq->cra_u.skcipher_req, areq->tsgl,
- areq->first_rsgl.sgl.sg, len, ctx->iv);
+ areq->first_rsgl.sgl.sgt.sgl, len, ctx->iv);
if (msg->msg_iocb && !is_sync_kiocb(msg->msg_iocb)) {
/* AIO operation */
diff --git a/include/crypto/if_alg.h b/include/crypto/if_alg.h
index 46494b33f5bc..34224e77f5a2 100644
--- a/include/crypto/if_alg.h
+++ b/include/crypto/if_alg.h
@@ -56,9 +56,8 @@ struct af_alg_type {
};
struct af_alg_sgl {
- struct scatterlist sg[ALG_MAX_PAGES + 1];
- struct page *pages[ALG_MAX_PAGES];
- unsigned int npages;
+ struct sg_table sgt;
+ struct scatterlist sgl[ALG_MAX_PAGES + 1];
bool need_unpin;
};
@@ -164,7 +163,6 @@ int af_alg_release(struct socket *sock);
void af_alg_release_parent(struct sock *sk);
int af_alg_accept(struct sock *sk, struct socket *newsock, bool kern);
-int af_alg_make_sg(struct af_alg_sgl *sgl, struct iov_iter *iter, int len);
void af_alg_free_sg(struct af_alg_sgl *sgl);
static inline struct alg_sock *alg_sk(struct sock *sk)
next prev parent reply other threads:[~2023-03-31 16:10 UTC|newest]
Thread overview: 80+ messages / expand[flat|nested] mbox.gz Atom feed top
2023-03-31 16:08 [PATCH v3 00/55] splice, net: Replace sendpage with sendmsg(MSG_SPLICE_PAGES) David Howells
2023-03-31 16:08 ` [PATCH v3 01/55] netfs: Fix netfs_extract_iter_to_sg() for ITER_UBUF/IOVEC David Howells
2023-03-31 19:05 ` Jeff Layton
2023-03-31 16:08 ` [PATCH v3 02/55] iov_iter: Remove last_offset member David Howells
2023-03-31 19:16 ` Jeff Layton
2023-03-31 16:08 ` [PATCH v3 03/55] net: Declare MSG_SPLICE_PAGES internal sendmsg() flag David Howells
2023-04-02 14:56 ` Willem de Bruijn
2023-03-31 16:08 ` [PATCH v3 04/55] mm: Move the page fragment allocator from page_alloc.c into its own file David Howells
2023-03-31 16:08 ` [PATCH v3 05/55] mm: Make the page_frag_cache allocator use multipage folios David Howells
2023-03-31 16:08 ` [PATCH v3 06/55] mm: Make the page_frag_cache allocator use per-cpu David Howells
2023-04-05 15:04 ` Christoph Hellwig
2023-03-31 16:08 ` [PATCH v3 07/55] tcp: Support MSG_SPLICE_PAGES David Howells
2023-03-31 16:08 ` [PATCH v3 08/55] tcp: Make sendmsg(MSG_SPLICE_PAGES) copy unspliceable data David Howells
2023-03-31 16:08 ` [PATCH v3 09/55] tcp: Convert do_tcp_sendpages() to use MSG_SPLICE_PAGES David Howells
2023-03-31 16:08 ` [PATCH v3 10/55] tcp_bpf: Inline do_tcp_sendpages as it's now a wrapper around tcp_sendmsg David Howells
2023-03-31 16:08 ` [PATCH v3 11/55] espintcp: Inline do_tcp_sendpages() David Howells
2023-03-31 16:08 ` [PATCH v3 12/55] tls: " David Howells
2023-03-31 16:08 ` [PATCH v3 13/55] siw: " David Howells
2023-03-31 16:08 ` [PATCH v3 14/55] tcp: Fold do_tcp_sendpages() into tcp_sendpage_locked() David Howells
2023-03-31 16:08 ` [PATCH v3 15/55] ip, udp: Support MSG_SPLICE_PAGES David Howells
2023-04-02 15:10 ` Willem de Bruijn
2023-04-03 9:50 ` David Howells
2023-04-03 13:46 ` Willem de Bruijn
2023-04-03 22:04 ` David Howells
2023-04-04 16:58 ` Willem de Bruijn
2023-04-04 17:16 ` David Howells
2023-04-04 17:36 ` Willem de Bruijn
2023-04-03 11:18 ` David Howells
2023-03-31 16:08 ` [PATCH v3 16/55] ip, udp: Make sendmsg(MSG_SPLICE_PAGES) copy unspliceable data David Howells
2023-03-31 16:08 ` [PATCH v3 17/55] ip6, udp6: Support MSG_SPLICE_PAGES David Howells
2023-03-31 16:08 ` [PATCH v3 18/55] udp: Convert udp_sendpage() to use MSG_SPLICE_PAGES David Howells
2023-03-31 16:08 ` [PATCH v3 19/55] af_unix: Support MSG_SPLICE_PAGES David Howells
2023-03-31 16:08 ` [PATCH v3 20/55] af_unix: Make sendmsg(MSG_SPLICE_PAGES) copy unspliceable data David Howells
2023-03-31 16:08 ` [PATCH v3 21/55] crypto: af_alg: Pin pages rather than ref'ing if appropriate David Howells
2023-03-31 16:08 ` David Howells [this message]
2023-03-31 16:08 ` [PATCH v3 23/55] crypto: af_alg: Indent the loop in af_alg_sendmsg() David Howells
2023-03-31 16:08 ` [PATCH v3 24/55] crypto: af_alg: Support MSG_SPLICE_PAGES David Howells
2023-03-31 16:08 ` [PATCH v3 25/55] crypto: af_alg: Convert af_alg_sendpage() to use MSG_SPLICE_PAGES David Howells
2023-03-31 16:08 ` [PATCH v3 26/55] crypto: af_alg/hash: Support MSG_SPLICE_PAGES David Howells
2023-03-31 16:08 ` [PATCH v3 27/55] tls/device: " David Howells
2023-03-31 16:08 ` [PATCH v3 28/55] tls/device: Convert tls_device_sendpage() to use MSG_SPLICE_PAGES David Howells
2023-03-31 16:08 ` [PATCH v3 29/55] tls/sw: Support MSG_SPLICE_PAGES David Howells
2023-03-31 16:08 ` [PATCH v3 30/55] tls/sw: Convert tls_sw_sendpage() to use MSG_SPLICE_PAGES David Howells
2023-03-31 16:08 ` [PATCH v3 31/55] chelsio: Support MSG_SPLICE_PAGES David Howells
2023-03-31 16:08 ` [PATCH v3 32/55] chelsio: Convert chtls_sendpage() to use MSG_SPLICE_PAGES David Howells
2023-03-31 16:08 ` [PATCH v3 33/55] kcm: Support MSG_SPLICE_PAGES David Howells
2023-03-31 16:08 ` [PATCH v3 34/55] kcm: Convert kcm_sendpage() to use MSG_SPLICE_PAGES David Howells
2023-03-31 16:08 ` [PATCH v3 35/55] splice, net: Use sendmsg(MSG_SPLICE_PAGES) rather than ->sendpage() David Howells
2023-03-31 16:08 ` [PATCH v3 36/55] splice, net: Reimplement splice_to_socket() to pass multiple bufs to sendmsg() David Howells
2023-03-31 16:08 ` [PATCH v3 37/55] Remove file->f_op->sendpage David Howells
2023-03-31 16:08 ` [PATCH v3 38/55] siw: Use sendmsg(MSG_SPLICE_PAGES) rather than sendpage to transmit David Howells
2023-04-04 10:52 ` Bernard Metzler
2023-04-05 8:18 ` David Howells
2023-03-31 16:08 ` [PATCH v3 39/55] ceph: Use sendmsg(MSG_SPLICE_PAGES) rather than sendpage David Howells
2023-03-31 16:08 ` [PATCH v3 40/55] iscsi: " David Howells
2023-03-31 16:09 ` [PATCH v3 41/55] iscsi: Assume "sendpage" is okay in iscsi_tcp_segment_map() David Howells
2023-04-24 17:19 ` Fabio M. De Francesco
2023-04-25 8:30 ` David Howells
2023-04-25 13:13 ` Fabio M. De Francesco
2023-03-31 16:09 ` [PATCH v3 42/55] tcp_bpf: Make tcp_bpf_sendpage() go through tcp_bpf_sendmsg(MSG_SPLICE_PAGES) David Howells
2023-03-31 16:09 ` [PATCH v3 43/55] net: Use sendmsg(MSG_SPLICE_PAGES) not sendpage in skb_send_sock() David Howells
2023-03-31 16:09 ` [PATCH v3 44/55] algif: Remove hash_sendpage*() David Howells
2023-03-31 16:09 ` [PATCH v3 45/55] ceph: Use sendmsg(MSG_SPLICE_PAGES) rather than sendpage() David Howells
2023-04-10 12:20 ` Xiubo Li
2023-03-31 16:09 ` [PATCH v3 46/55] rds: Use sendmsg(MSG_SPLICE_PAGES) rather than sendpage David Howells
2023-03-31 16:09 ` [PATCH v3 47/55] dlm: " David Howells
2023-03-31 16:09 ` [PATCH v3 48/55] sunrpc: Use sendmsg(MSG_SPLICE_PAGES) rather then sendpage David Howells
2023-03-31 16:09 ` [PATCH v3 49/55] nvme: " David Howells
2023-03-31 16:09 ` [PATCH v3 50/55] kcm: " David Howells
2023-03-31 16:09 ` [PATCH v3 51/55] smc: Drop smc_sendpage() in favour of smc_sendmsg() + MSG_SPLICE_PAGES David Howells
2023-04-26 13:07 ` D. Wythe
2023-04-28 3:12 ` D. Wythe
2023-03-31 16:09 ` [PATCH v3 52/55] ocfs2: Use sendmsg(MSG_SPLICE_PAGES) rather than sendpage() David Howells
2023-03-31 16:09 ` [PATCH v3 53/55] drbd: Use sendmsg(MSG_SPLICE_PAGES) rather than sendmsg() David Howells
2023-03-31 16:09 ` [PATCH v3 54/55] drdb: Send an entire bio in a single sendmsg David Howells
2023-03-31 16:27 ` Trivial TLS server David Howells
2023-03-31 16:28 ` Trivial TLS client David Howells
2023-03-31 17:37 ` Test program for AF_KCM David Howells
2023-04-03 9:30 ` [PATCH v3 00/55] splice, net: Replace sendpage with sendmsg(MSG_SPLICE_PAGES) Christoph Hellwig
2023-04-03 9:34 ` Is AF_KCM functional? David Howells
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20230331160914.1608208-23-dhowells@redhat.com \
--to=dhowells@redhat.com \
--cc=axboe@kernel.dk \
--cc=brauner@kernel.org \
--cc=chuck.lever@oracle.com \
--cc=davem@davemloft.net \
--cc=edumazet@google.com \
--cc=hch@infradead.org \
--cc=herbert@gondor.apana.org.au \
--cc=jlayton@kernel.org \
--cc=kuba@kernel.org \
--cc=linux-crypto@vger.kernel.org \
--cc=linux-fsdevel@vger.kernel.org \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-mm@kvack.org \
--cc=netdev@vger.kernel.org \
--cc=pabeni@redhat.com \
--cc=torvalds@linux-foundation.org \
--cc=viro@zeniv.linux.org.uk \
--cc=willy@infradead.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).