From: Christoph Hellwig <hch@lst.de>
To: viro@zeniv.linux.org.uk
Cc: Avi Kivity <avi@scylladb.com>,
linux-aio@kvack.org, linux-fsdevel@vger.kernel.org,
netdev@vger.kernel.org, linux-api@vger.kernel.org,
linux-kernel@vger.kernel.org
Subject: [PATCH 07/30] aio: add delayed cancel support
Date: Wed, 28 Mar 2018 09:29:03 +0200 [thread overview]
Message-ID: <20180328072926.17131-8-hch@lst.de> (raw)
In-Reply-To: <20180328072926.17131-1-hch@lst.de>
The upcoming aio poll support would like to be able to complete the
iocb inline from the cancellation context, but that would cause
a lock order reversal. Add support for optionally moving the cancelation
outside the context lock to avoid this reversal.
To make this safe aio_complete needs to check if this call should complete
the iocb. If it didn't the callers must not release any other resources.
Signed-off-by: Christoph Hellwig <hch@lst.de>
---
fs/aio.c | 81 ++++++++++++++++++++++++++++++++++++++++++++++------------------
1 file changed, 59 insertions(+), 22 deletions(-)
diff --git a/fs/aio.c b/fs/aio.c
index c36eec8b0879..232dd84fc897 100644
--- a/fs/aio.c
+++ b/fs/aio.c
@@ -177,6 +177,11 @@ struct aio_kiocb {
struct list_head ki_list; /* the aio core uses this
* for cancellation */
+ unsigned int flags; /* protected by ctx->ctx_lock */
+#define AIO_IOCB_CAN_CANCEL (1 << 0)
+#define AIO_IOCB_DELAYED_CANCEL (1 << 1)
+#define AIO_IOCB_CANCELLED (1 << 2)
+
/*
* If the aio_resfd field of the userspace iocb is not zero,
* this is the underlying eventfd context to deliver events to.
@@ -543,9 +548,9 @@ static int aio_setup_ring(struct kioctx *ctx, unsigned int nr_events)
#define AIO_EVENTS_FIRST_PAGE ((PAGE_SIZE - sizeof(struct aio_ring)) / sizeof(struct io_event))
#define AIO_EVENTS_OFFSET (AIO_EVENTS_PER_PAGE - AIO_EVENTS_FIRST_PAGE)
-void kiocb_set_cancel_fn(struct kiocb *iocb, kiocb_cancel_fn *cancel)
+static void __kiocb_set_cancel_fn(struct aio_kiocb *req,
+ kiocb_cancel_fn *cancel, unsigned int iocb_flags)
{
- struct aio_kiocb *req = container_of(iocb, struct aio_kiocb, rw);
struct kioctx *ctx = req->ki_ctx;
unsigned long flags;
@@ -555,8 +560,15 @@ void kiocb_set_cancel_fn(struct kiocb *iocb, kiocb_cancel_fn *cancel)
spin_lock_irqsave(&ctx->ctx_lock, flags);
list_add_tail(&req->ki_list, &ctx->active_reqs);
req->ki_cancel = cancel;
+ req->flags |= (AIO_IOCB_CAN_CANCEL | iocb_flags);
spin_unlock_irqrestore(&ctx->ctx_lock, flags);
}
+
+void kiocb_set_cancel_fn(struct kiocb *iocb, kiocb_cancel_fn *cancel)
+{
+ return __kiocb_set_cancel_fn(container_of(iocb, struct aio_kiocb, rw),
+ cancel, 0);
+}
EXPORT_SYMBOL(kiocb_set_cancel_fn);
static int kiocb_cancel(struct aio_kiocb *kiocb)
@@ -599,17 +611,26 @@ static void free_ioctx_users(struct percpu_ref *ref)
{
struct kioctx *ctx = container_of(ref, struct kioctx, users);
struct aio_kiocb *req;
+ LIST_HEAD(list);
spin_lock_irq(&ctx->ctx_lock);
-
while (!list_empty(&ctx->active_reqs)) {
req = list_first_entry(&ctx->active_reqs,
struct aio_kiocb, ki_list);
- kiocb_cancel(req);
+ if (req->flags & AIO_IOCB_DELAYED_CANCEL) {
+ req->flags |= AIO_IOCB_CANCELLED;
+ list_move_tail(&req->ki_list, &list);
+ } else {
+ kiocb_cancel(req);
+ }
}
-
spin_unlock_irq(&ctx->ctx_lock);
+ while (!list_empty(&list)) {
+ req = list_first_entry(&list, struct aio_kiocb, ki_list);
+ kiocb_cancel(req);
+ }
+
percpu_ref_kill(&ctx->reqs);
percpu_ref_put(&ctx->reqs);
}
@@ -1045,22 +1066,30 @@ static struct kioctx *lookup_ioctx(unsigned long ctx_id)
return ret;
}
+#define AIO_COMPLETE_CANCEL (1 << 0)
+
/* aio_complete
* Called when the io request on the given iocb is complete.
*/
-static void aio_complete(struct aio_kiocb *iocb, long res, long res2)
+static bool aio_complete(struct aio_kiocb *iocb, long res, long res2,
+ unsigned complete_flags)
{
struct kioctx *ctx = iocb->ki_ctx;
struct aio_ring *ring;
struct io_event *ev_page, *event;
unsigned tail, pos, head;
- unsigned long flags;
-
- if (!list_empty_careful(&iocb->ki_list)) {
- unsigned long flags;
+ unsigned long flags;
+ if (iocb->flags & AIO_IOCB_CAN_CANCEL) {
spin_lock_irqsave(&ctx->ctx_lock, flags);
- list_del(&iocb->ki_list);
+ if (!(complete_flags & AIO_COMPLETE_CANCEL) &&
+ (iocb->flags & AIO_IOCB_CANCELLED)) {
+ spin_unlock_irqrestore(&ctx->ctx_lock, flags);
+ return false;
+ }
+
+ if (!list_empty(&iocb->ki_list))
+ list_del(&iocb->ki_list);
spin_unlock_irqrestore(&ctx->ctx_lock, flags);
}
@@ -1136,6 +1165,7 @@ static void aio_complete(struct aio_kiocb *iocb, long res, long res2)
wake_up(&ctx->wait);
percpu_ref_put(&ctx->reqs);
+ return true;
}
/* aio_read_events_ring
@@ -1384,6 +1414,7 @@ SYSCALL_DEFINE1(io_destroy, aio_context_t, ctx)
static void aio_complete_rw(struct kiocb *kiocb, long res, long res2)
{
struct aio_kiocb *iocb = container_of(kiocb, struct aio_kiocb, rw);
+ struct file *file = kiocb->ki_filp;
if (kiocb->ki_flags & IOCB_WRITE) {
struct inode *inode = file_inode(kiocb->ki_filp);
@@ -1397,8 +1428,8 @@ static void aio_complete_rw(struct kiocb *kiocb, long res, long res2)
file_end_write(kiocb->ki_filp);
}
- fput(kiocb->ki_filp);
- aio_complete(iocb, res, res2);
+ if (aio_complete(iocb, res, res2, 0))
+ fput(file);
}
static int aio_prep_rw(struct kiocb *req, struct iocb *iocb)
@@ -1541,11 +1572,13 @@ static ssize_t aio_write(struct kiocb *req, struct iocb *iocb, bool vectored,
static void aio_fsync_work(struct work_struct *work)
{
struct fsync_iocb *req = container_of(work, struct fsync_iocb, work);
+ struct aio_kiocb *iocb = container_of(req, struct aio_kiocb, fsync);
+ struct file *file = req->file;
int ret;
ret = vfs_fsync(req->file, req->datasync);
- fput(req->file);
- aio_complete(container_of(req, struct aio_kiocb, fsync), ret, 0);
+ if (aio_complete(iocb, ret, 0, 0))
+ fput(file);
}
static int aio_fsync(struct fsync_iocb *req, struct iocb *iocb, bool datasync)
@@ -1807,8 +1840,8 @@ SYSCALL_DEFINE3(io_cancel, aio_context_t, ctx_id, struct iocb __user *, iocb,
{
struct kioctx *ctx;
struct aio_kiocb *kiocb;
+ int ret = -EINVAL;
u32 key;
- int ret;
ret = get_user(key, &iocb->aio_key);
if (unlikely(ret))
@@ -1819,15 +1852,19 @@ SYSCALL_DEFINE3(io_cancel, aio_context_t, ctx_id, struct iocb __user *, iocb,
return -EINVAL;
spin_lock_irq(&ctx->ctx_lock);
-
kiocb = lookup_kiocb(ctx, iocb, key);
- if (kiocb)
- ret = kiocb_cancel(kiocb);
- else
- ret = -EINVAL;
-
+ if (kiocb) {
+ if (kiocb->flags & AIO_IOCB_DELAYED_CANCEL) {
+ kiocb->flags |= AIO_IOCB_CANCELLED;
+ } else {
+ ret = kiocb_cancel(kiocb);
+ kiocb = NULL;
+ }
+ }
spin_unlock_irq(&ctx->ctx_lock);
+ if (kiocb)
+ ret = kiocb_cancel(kiocb);
if (!ret) {
/*
* The result argument is no longer used - the io_event is
--
2.14.2
--
To unsubscribe, send a message with 'unsubscribe linux-aio' in
the body to majordomo@kvack.org. For more info on Linux AIO,
see: http://www.kvack.org/aio/
Don't email: <a href=mailto:"aart@kvack.org">aart@kvack.org</a>
next prev parent reply other threads:[~2018-03-28 7:29 UTC|newest]
Thread overview: 39+ messages / expand[flat|nested] mbox.gz Atom feed top
2018-03-28 7:28 aio poll and a new in-kernel poll API V7 Christoph Hellwig
2018-03-28 7:28 ` [PATCH 01/30] fs: unexport poll_schedule_timeout Christoph Hellwig
2018-03-28 7:28 ` [PATCH 02/30] fs: cleanup do_pollfd Christoph Hellwig
2018-03-28 7:28 ` [PATCH 03/30] fs: update documentation to mention __poll_t and match the code Christoph Hellwig
2018-03-28 7:29 ` [PATCH 04/30] fs: add new vfs_poll and file_can_poll helpers Christoph Hellwig
2018-03-28 7:29 ` [PATCH 05/30] fs: introduce new ->get_poll_head and ->poll_mask methods Christoph Hellwig
2018-03-28 7:29 ` [PATCH 06/30] aio: simplify cancellation Christoph Hellwig
2018-03-28 7:29 ` Christoph Hellwig [this message]
2018-03-28 16:35 ` [PATCH 07/30] aio: add delayed cancel support Al Viro
2018-03-28 21:34 ` Al Viro
2018-03-29 8:53 ` Christoph Hellwig
2018-03-29 14:25 ` Al Viro
2018-03-29 18:08 ` Christoph Hellwig
2018-03-28 7:29 ` [PATCH 08/30] aio: implement IOCB_CMD_POLL Christoph Hellwig
2018-03-28 7:29 ` [PATCH 09/30] net: refactor socket_poll Christoph Hellwig
2018-03-28 7:29 ` [PATCH 10/30] net: add support for ->poll_mask in proto_ops Christoph Hellwig
2018-03-28 7:29 ` [PATCH 11/30] net: remove sock_no_poll Christoph Hellwig
2018-03-28 7:29 ` [PATCH 12/30] net/tcp: convert to ->poll_mask Christoph Hellwig
2018-03-28 7:29 ` [PATCH 13/30] net/unix: " Christoph Hellwig
2018-03-28 7:29 ` [PATCH 14/30] net: convert datagram_poll users tp ->poll_mask Christoph Hellwig
2018-03-28 7:29 ` [PATCH 15/30] net/dccp: convert to ->poll_mask Christoph Hellwig
2018-03-28 7:29 ` [PATCH 16/30] net/atm: " Christoph Hellwig
2018-03-28 7:29 ` [PATCH 17/30] net/vmw_vsock: " Christoph Hellwig
2018-03-28 7:29 ` [PATCH 18/30] net/tipc: " Christoph Hellwig
2018-03-28 7:29 ` [PATCH 19/30] net/sctp: " Christoph Hellwig
2018-03-28 7:29 ` [PATCH 20/30] net/bluetooth: " Christoph Hellwig
2018-03-28 7:29 ` [PATCH 21/30] net/caif: " Christoph Hellwig
2018-03-28 7:29 ` [PATCH 22/30] net/nfc: " Christoph Hellwig
2018-03-28 7:29 ` [PATCH 23/30] net/phonet: " Christoph Hellwig
2018-03-28 7:29 ` [PATCH 24/30] net/iucv: " Christoph Hellwig
2018-03-28 7:29 ` [PATCH 25/30] net/rxrpc: " Christoph Hellwig
2018-03-28 7:29 ` [PATCH 26/30] crypto: af_alg: " Christoph Hellwig
2018-03-28 7:29 ` [PATCH 27/30] pipe: " Christoph Hellwig
2018-03-28 7:29 ` [PATCH 28/30] eventfd: switch " Christoph Hellwig
2018-03-28 7:29 ` [PATCH 29/30] timerfd: convert " Christoph Hellwig
2018-03-28 7:29 ` [PATCH 30/30] random: " Christoph Hellwig
-- strict thread matches above, loose matches on Subject: below --
2018-03-29 20:32 aio poll and a new in-kernel poll API V8 Christoph Hellwig
2018-03-29 20:33 ` [PATCH 07/30] aio: add delayed cancel support Christoph Hellwig
2018-03-29 22:35 ` Al Viro
2018-03-30 7:14 ` Christoph Hellwig
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20180328072926.17131-8-hch@lst.de \
--to=hch@lst.de \
--cc=avi@scylladb.com \
--cc=linux-aio@kvack.org \
--cc=linux-api@vger.kernel.org \
--cc=linux-fsdevel@vger.kernel.org \
--cc=linux-kernel@vger.kernel.org \
--cc=netdev@vger.kernel.org \
--cc=viro@zeniv.linux.org.uk \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).