path: root/fs/aio.c
diff options
authorBenjamin LaHaise <bcrl@kvack.org>2014-04-29 12:45:17 -0400
committerBenjamin LaHaise <bcrl@kvack.org>2014-04-29 12:45:17 -0400
commitfb2d44838320b78e6e3b5eb2e35b70f62f262e4c (patch)
treebb88488c0bcb41cd7e1267afa85a5d8871eb37b3 /fs/aio.c
parentd52a8f9ead60338306c4f03e9ce575c5f23a4b65 (diff)
aio: report error from io_destroy() when threads race in io_destroy()
As reported by Anatol Pomozov, io_destroy() fails to report an error when it loses the race to destroy a given ioctx. Since there is a difference in behaviour between the thread that wins the race (which blocks on outstanding io requests) versus lthe thread that loses (which returns immediately), wire up a return code from kill_ioctx() to the io_destroy() syscall. Signed-off-by: Benjamin LaHaise <bcrl@kvack.org> Cc: Anatol Pomozov <anatol.pomozov@gmail.com>
Diffstat (limited to 'fs/aio.c')
1 files changed, 9 insertions, 7 deletions
diff --git a/fs/aio.c b/fs/aio.c
index 908006e8c7ff..044c1c86decc 100644
--- a/fs/aio.c
+++ b/fs/aio.c
@@ -727,7 +727,7 @@ err:
* when the processes owning a context have all exited to encourage
* the rapid destruction of the kioctx.
-static void kill_ioctx(struct mm_struct *mm, struct kioctx *ctx,
+static int kill_ioctx(struct mm_struct *mm, struct kioctx *ctx,
struct completion *requests_done)
if (!atomic_xchg(&ctx->dead, 1)) {
@@ -759,10 +759,10 @@ static void kill_ioctx(struct mm_struct *mm, struct kioctx *ctx,
ctx->requests_done = requests_done;
- } else {
- if (requests_done)
- complete(requests_done);
+ return 0;
+ return -EINVAL;
/* wait_on_sync_kiocb:
@@ -1219,21 +1219,23 @@ SYSCALL_DEFINE1(io_destroy, aio_context_t, ctx)
if (likely(NULL != ioctx)) {
struct completion requests_done =
+ int ret;
/* Pass requests_done to kill_ioctx() where it can be set
* in a thread-safe way. If we try to set it here then we have
* a race condition if two io_destroy() called simultaneously.
- kill_ioctx(current->mm, ioctx, &requests_done);
+ ret = kill_ioctx(current->mm, ioctx, &requests_done);
/* Wait until all IO for the context are done. Otherwise kernel
* keep using user-space buffers even if user thinks the context
* is destroyed.
- wait_for_completion(&requests_done);
+ if (!ret)
+ wait_for_completion(&requests_done);
- return 0;
+ return ret;
pr_debug("EINVAL: io_destroy: invalid context id\n");
return -EINVAL;