>From 8b0d8f2e723bcf52d010c46130eb759770a0dc11 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Mon, 19 Nov 2018 08:13:19 +0100 Subject: aio: split get_reqs_available from aio_get_req This makes the polled case nice to handle, and matches the put side. Signed-off-by: Christoph Hellwig --- fs/aio.c | 41 ++++++++++++++++++++++------------------- 1 file changed, 22 insertions(+), 19 deletions(-) diff --git a/fs/aio.c b/fs/aio.c index e02085fe10d7..348f04129035 100644 --- a/fs/aio.c +++ b/fs/aio.c @@ -935,7 +935,7 @@ static void put_reqs_available(struct kioctx *ctx, unsigned nr) local_irq_restore(flags); } -static bool get_reqs_available(struct kioctx *ctx) +static bool __get_reqs_available(struct kioctx *ctx) { struct kioctx_cpu *kcpu; bool ret = false; @@ -1027,23 +1027,25 @@ static void user_refill_reqs_available(struct kioctx *ctx) spin_unlock_irq(&ctx->completion_lock); } +static bool get_reqs_available(struct kioctx *ctx) +{ + if (__get_reqs_available(ctx)) + return true; + user_refill_reqs_available(ctx); + return __get_reqs_available(ctx); +} + /* aio_get_req * Allocate a slot for an aio request. * Returns NULL if no requests are free. */ -static inline struct aio_kiocb *aio_get_req(struct kioctx *ctx, bool needs_ring) +static inline struct aio_kiocb *aio_get_req(struct kioctx *ctx) { struct aio_kiocb *req; - if (needs_ring && !get_reqs_available(ctx)) { - user_refill_reqs_available(ctx); - if (!get_reqs_available(ctx)) - return NULL; - } - req = kmem_cache_alloc(kiocb_cachep, GFP_KERNEL|__GFP_ZERO); if (unlikely(!req)) - goto out_put; + return NULL; percpu_ref_get(&ctx->reqs); INIT_LIST_HEAD(&req->ki_list); @@ -1051,10 +1053,6 @@ static inline struct aio_kiocb *aio_get_req(struct kioctx *ctx, bool needs_ring) refcount_set(&req->ki_refcnt, 0); req->ki_ctx = ctx; return req; -out_put: - if (needs_ring) - put_reqs_available(ctx, 1); - return NULL; } static struct kioctx *lookup_ioctx(unsigned long ctx_id) @@ -2200,17 +2198,21 @@ static int io_submit_one(struct kioctx *ctx, struct iocb __user *user_iocb, return -EINVAL; } - if (iocb.aio_flags & IOCB_FLAG_HIPRI) + if (iocb.aio_flags & IOCB_FLAG_HIPRI) { ctx_type = CTX_TYPE_POLLED; - else + } else { ctx_type = CTX_TYPE_NORMAL; + if (!get_reqs_available(ctx)) + return -EAGAIN; + } /* * Polled IO doesn't need ring reservations */ - req = aio_get_req(ctx, ctx_type == CTX_TYPE_NORMAL); + ret = -EAGAIN; + req = aio_get_req(ctx); if (unlikely(!req)) - return -EAGAIN; + goto out_put_reqs_available; if (iocb.aio_flags & IOCB_FLAG_RESFD) { /* @@ -2294,12 +2296,13 @@ static int io_submit_one(struct kioctx *ctx, struct iocb __user *user_iocb, return 0; out_put_req: - if (ctx_type == CTX_TYPE_NORMAL) - put_reqs_available(ctx, 1); percpu_ref_put(&ctx->reqs); if (req->ki_eventfd) eventfd_ctx_put(req->ki_eventfd); kmem_cache_free(kiocb_cachep, req); +out_put_reqs_available: + if (ctx_type == CTX_TYPE_NORMAL) + put_reqs_available(ctx, 1); return ret; } -- 2.19.1