aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2021-01-10 12:39:38 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2021-01-10 12:39:38 -0800
commitd430adfea8d2c5baa186cabb130235f72fecbd5b (patch)
tree3a40c99dcabb507fbce77ca5c71b4a27bad7d98f
parent28318f53503090fcd8fd27c49445396ea2ace44b (diff)
parentd9d05217cb6990b9a56e13b56e7a1b71e2551f6c (diff)
downloadlinux-stericsson-d430adfea8d2c5baa186cabb130235f72fecbd5b.tar.gz
Merge tag 'io_uring-5.11-2021-01-10' of git://git.kernel.dk/linux-block
Pull io_uring fixes from Jens Axboe: "A bit larger than I had hoped at this point, but it's all changes that will be directed towards stable anyway. In detail: - Fix a merge window regression on error return (Matthew) - Remove useless variable declaration/assignment (Ye Bin) - IOPOLL fixes (Pavel) - Exit and cancelation fixes (Pavel) - fasync lockdep complaint fix (Pavel) - Ensure SQPOLL is synchronized with creator life time (Pavel)" * tag 'io_uring-5.11-2021-01-10' of git://git.kernel.dk/linux-block: io_uring: stop SQPOLL submit on creator's death io_uring: add warn_once for io_uring_flush() io_uring: inline io_uring_attempt_task_drop() io_uring: io_rw_reissue lockdep annotations io_uring: synchronise ev_posted() with waitqueues io_uring: dont kill fasync under completion_lock io_uring: trigger eventfd for IOPOLL io_uring: Fix return value from alloc_fixed_file_ref_node io_uring: Delete useless variable ‘id’ in io_prep_async_work io_uring: cancel more aggressively in exit_work io_uring: drop file refs after task cancel io_uring: patch up IOPOLL overflow_flush sync io_uring: synchronise IOPOLL on task_submit fail
-rw-r--r--fs/io_uring.c256
1 files changed, 167 insertions, 89 deletions
diff --git a/fs/io_uring.c b/fs/io_uring.c
index ca46f314640b..2f305c097bd5 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -262,6 +262,7 @@ struct io_ring_ctx {
unsigned int drain_next: 1;
unsigned int eventfd_async: 1;
unsigned int restricted: 1;
+ unsigned int sqo_dead: 1;
/*
* Ring buffer of indices into array of io_uring_sqe, which is
@@ -992,6 +993,9 @@ enum io_mem_account {
ACCT_PINNED,
};
+static void __io_uring_cancel_task_requests(struct io_ring_ctx *ctx,
+ struct task_struct *task);
+
static void destroy_fixed_file_ref_node(struct fixed_file_ref_node *ref_node);
static struct fixed_file_ref_node *alloc_fixed_file_ref_node(
struct io_ring_ctx *ctx);
@@ -1342,11 +1346,6 @@ static void __io_commit_cqring(struct io_ring_ctx *ctx)
/* order cqe stores with ring update */
smp_store_release(&rings->cq.tail, ctx->cached_cq_tail);
-
- if (wq_has_sleeper(&ctx->cq_wait)) {
- wake_up_interruptible(&ctx->cq_wait);
- kill_fasync(&ctx->cq_fasync, SIGIO, POLL_IN);
- }
}
static void io_put_identity(struct io_uring_task *tctx, struct io_kiocb *req)
@@ -1520,10 +1519,8 @@ static void io_prep_async_work(struct io_kiocb *req)
{
const struct io_op_def *def = &io_op_defs[req->opcode];
struct io_ring_ctx *ctx = req->ctx;
- struct io_identity *id;
io_req_init_async(req);
- id = req->work.identity;
if (req->flags & REQ_F_FORCE_ASYNC)
req->work.flags |= IO_WQ_WORK_CONCURRENT;
@@ -1704,18 +1701,42 @@ static inline unsigned __io_cqring_events(struct io_ring_ctx *ctx)
static void io_cqring_ev_posted(struct io_ring_ctx *ctx)
{
+ /* see waitqueue_active() comment */
+ smp_mb();
+
if (waitqueue_active(&ctx->wait))
wake_up(&ctx->wait);
if (ctx->sq_data && waitqueue_active(&ctx->sq_data->wait))
wake_up(&ctx->sq_data->wait);
if (io_should_trigger_evfd(ctx))
eventfd_signal(ctx->cq_ev_fd, 1);
+ if (waitqueue_active(&ctx->cq_wait)) {
+ wake_up_interruptible(&ctx->cq_wait);
+ kill_fasync(&ctx->cq_fasync, SIGIO, POLL_IN);
+ }
+}
+
+static void io_cqring_ev_posted_iopoll(struct io_ring_ctx *ctx)
+{
+ /* see waitqueue_active() comment */
+ smp_mb();
+
+ if (ctx->flags & IORING_SETUP_SQPOLL) {
+ if (waitqueue_active(&ctx->wait))
+ wake_up(&ctx->wait);
+ }
+ if (io_should_trigger_evfd(ctx))
+ eventfd_signal(ctx->cq_ev_fd, 1);
+ if (waitqueue_active(&ctx->cq_wait)) {
+ wake_up_interruptible(&ctx->cq_wait);
+ kill_fasync(&ctx->cq_fasync, SIGIO, POLL_IN);
+ }
}
/* Returns true if there are no backlogged entries after the flush */
-static bool io_cqring_overflow_flush(struct io_ring_ctx *ctx, bool force,
- struct task_struct *tsk,
- struct files_struct *files)
+static bool __io_cqring_overflow_flush(struct io_ring_ctx *ctx, bool force,
+ struct task_struct *tsk,
+ struct files_struct *files)
{
struct io_rings *rings = ctx->rings;
struct io_kiocb *req, *tmp;
@@ -1768,6 +1789,20 @@ static bool io_cqring_overflow_flush(struct io_ring_ctx *ctx, bool force,
return all_flushed;
}
+static void io_cqring_overflow_flush(struct io_ring_ctx *ctx, bool force,
+ struct task_struct *tsk,
+ struct files_struct *files)
+{
+ if (test_bit(0, &ctx->cq_check_overflow)) {
+ /* iopoll syncs against uring_lock, not completion_lock */
+ if (ctx->flags & IORING_SETUP_IOPOLL)
+ mutex_lock(&ctx->uring_lock);
+ __io_cqring_overflow_flush(ctx, force, tsk, files);
+ if (ctx->flags & IORING_SETUP_IOPOLL)
+ mutex_unlock(&ctx->uring_lock);
+ }
+}
+
static void __io_cqring_fill_event(struct io_kiocb *req, long res, long cflags)
{
struct io_ring_ctx *ctx = req->ctx;
@@ -2127,14 +2162,14 @@ static void __io_req_task_submit(struct io_kiocb *req)
{
struct io_ring_ctx *ctx = req->ctx;
- if (!__io_sq_thread_acquire_mm(ctx) &&
- !__io_sq_thread_acquire_files(ctx)) {
- mutex_lock(&ctx->uring_lock);
+ mutex_lock(&ctx->uring_lock);
+ if (!ctx->sqo_dead &&
+ !__io_sq_thread_acquire_mm(ctx) &&
+ !__io_sq_thread_acquire_files(ctx))
__io_queue_sqe(req, NULL);
- mutex_unlock(&ctx->uring_lock);
- } else {
+ else
__io_req_task_cancel(req, -EFAULT);
- }
+ mutex_unlock(&ctx->uring_lock);
}
static void io_req_task_submit(struct callback_head *cb)
@@ -2313,20 +2348,8 @@ static void io_double_put_req(struct io_kiocb *req)
io_free_req(req);
}
-static unsigned io_cqring_events(struct io_ring_ctx *ctx, bool noflush)
+static unsigned io_cqring_events(struct io_ring_ctx *ctx)
{
- if (test_bit(0, &ctx->cq_check_overflow)) {
- /*
- * noflush == true is from the waitqueue handler, just ensure
- * we wake up the task, and the next invocation will flush the
- * entries. We cannot safely to it from here.
- */
- if (noflush)
- return -1U;
-
- io_cqring_overflow_flush(ctx, false, NULL, NULL);
- }
-
/* See comment at the top of this file */
smp_rmb();
return __io_cqring_events(ctx);
@@ -2424,8 +2447,7 @@ static void io_iopoll_complete(struct io_ring_ctx *ctx, unsigned int *nr_events,
}
io_commit_cqring(ctx);
- if (ctx->flags & IORING_SETUP_SQPOLL)
- io_cqring_ev_posted(ctx);
+ io_cqring_ev_posted_iopoll(ctx);
io_req_free_batch_finish(ctx, &rb);
if (!list_empty(&again))
@@ -2551,7 +2573,9 @@ static int io_iopoll_check(struct io_ring_ctx *ctx, long min)
* If we do, we can potentially be spinning for commands that
* already triggered a CQE (eg in error).
*/
- if (io_cqring_events(ctx, false))
+ if (test_bit(0, &ctx->cq_check_overflow))
+ __io_cqring_overflow_flush(ctx, false, NULL, NULL);
+ if (io_cqring_events(ctx))
break;
/*
@@ -2668,6 +2692,8 @@ static bool io_rw_reissue(struct io_kiocb *req, long res)
if ((res != -EAGAIN && res != -EOPNOTSUPP) || io_wq_current_is_worker())
return false;
+ lockdep_assert_held(&req->ctx->uring_lock);
+
ret = io_sq_thread_acquire_mm_files(req->ctx, req);
if (io_resubmit_prep(req, ret)) {
@@ -6826,7 +6852,7 @@ static int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr)
/* if we have a backlog and couldn't flush it all, return BUSY */
if (test_bit(0, &ctx->sq_check_overflow)) {
- if (!io_cqring_overflow_flush(ctx, false, NULL, NULL))
+ if (!__io_cqring_overflow_flush(ctx, false, NULL, NULL))
return -EBUSY;
}
@@ -6928,7 +6954,8 @@ static int __io_sq_thread(struct io_ring_ctx *ctx, bool cap_entries)
if (!list_empty(&ctx->iopoll_list))
io_do_iopoll(ctx, &nr_events, 0);
- if (to_submit && likely(!percpu_ref_is_dying(&ctx->refs)))
+ if (to_submit && !ctx->sqo_dead &&
+ likely(!percpu_ref_is_dying(&ctx->refs)))
ret = io_submit_sqes(ctx, to_submit);
mutex_unlock(&ctx->uring_lock);
}
@@ -7089,7 +7116,7 @@ struct io_wait_queue {
unsigned nr_timeouts;
};
-static inline bool io_should_wake(struct io_wait_queue *iowq, bool noflush)
+static inline bool io_should_wake(struct io_wait_queue *iowq)
{
struct io_ring_ctx *ctx = iowq->ctx;
@@ -7098,7 +7125,7 @@ static inline bool io_should_wake(struct io_wait_queue *iowq, bool noflush)
* started waiting. For timeouts, we always want to return to userspace,
* regardless of event count.
*/
- return io_cqring_events(ctx, noflush) >= iowq->to_wait ||
+ return io_cqring_events(ctx) >= iowq->to_wait ||
atomic_read(&ctx->cq_timeouts) != iowq->nr_timeouts;
}
@@ -7108,11 +7135,13 @@ static int io_wake_function(struct wait_queue_entry *curr, unsigned int mode,
struct io_wait_queue *iowq = container_of(curr, struct io_wait_queue,
wq);
- /* use noflush == true, as we can't safely rely on locking context */
- if (!io_should_wake(iowq, true))
- return -1;
-
- return autoremove_wake_function(curr, mode, wake_flags, key);
+ /*
+ * Cannot safely flush overflowed CQEs from here, ensure we wake up
+ * the task, and the next invocation will do it.
+ */
+ if (io_should_wake(iowq) || test_bit(0, &iowq->ctx->cq_check_overflow))
+ return autoremove_wake_function(curr, mode, wake_flags, key);
+ return -1;
}
static int io_run_task_work_sig(void)
@@ -7149,7 +7178,8 @@ static int io_cqring_wait(struct io_ring_ctx *ctx, int min_events,
int ret = 0;
do {
- if (io_cqring_events(ctx, false) >= min_events)
+ io_cqring_overflow_flush(ctx, false, NULL, NULL);
+ if (io_cqring_events(ctx) >= min_events)
return 0;
if (!io_run_task_work())
break;
@@ -7177,6 +7207,7 @@ static int io_cqring_wait(struct io_ring_ctx *ctx, int min_events,
iowq.nr_timeouts = atomic_read(&ctx->cq_timeouts);
trace_io_uring_cqring_wait(ctx, min_events);
do {
+ io_cqring_overflow_flush(ctx, false, NULL, NULL);
prepare_to_wait_exclusive(&ctx->wait, &iowq.wq,
TASK_INTERRUPTIBLE);
/* make sure we run task_work before checking for signals */
@@ -7185,8 +7216,10 @@ static int io_cqring_wait(struct io_ring_ctx *ctx, int min_events,
continue;
else if (ret < 0)
break;
- if (io_should_wake(&iowq, false))
+ if (io_should_wake(&iowq))
break;
+ if (test_bit(0, &ctx->cq_check_overflow))
+ continue;
if (uts) {
timeout = schedule_timeout(timeout);
if (timeout == 0) {
@@ -7684,12 +7717,12 @@ static struct fixed_file_ref_node *alloc_fixed_file_ref_node(
ref_node = kzalloc(sizeof(*ref_node), GFP_KERNEL);
if (!ref_node)
- return ERR_PTR(-ENOMEM);
+ return NULL;
if (percpu_ref_init(&ref_node->refs, io_file_data_ref_zero,
0, GFP_KERNEL)) {
kfree(ref_node);
- return ERR_PTR(-ENOMEM);
+ return NULL;
}
INIT_LIST_HEAD(&ref_node->node);
INIT_LIST_HEAD(&ref_node->file_list);
@@ -7783,9 +7816,9 @@ static int io_sqe_files_register(struct io_ring_ctx *ctx, void __user *arg,
}
ref_node = alloc_fixed_file_ref_node(ctx);
- if (IS_ERR(ref_node)) {
+ if (!ref_node) {
io_sqe_files_unregister(ctx);
- return PTR_ERR(ref_node);
+ return -ENOMEM;
}
io_sqe_files_set_node(file_data, ref_node);
@@ -7885,8 +7918,8 @@ static int __io_sqe_files_update(struct io_ring_ctx *ctx,
return -EINVAL;
ref_node = alloc_fixed_file_ref_node(ctx);
- if (IS_ERR(ref_node))
- return PTR_ERR(ref_node);
+ if (!ref_node)
+ return -ENOMEM;
done = 0;
fds = u64_to_user_ptr(up->fds);
@@ -8624,7 +8657,8 @@ static __poll_t io_uring_poll(struct file *file, poll_table *wait)
smp_rmb();
if (!io_sqring_full(ctx))
mask |= EPOLLOUT | EPOLLWRNORM;
- if (io_cqring_events(ctx, false))
+ io_cqring_overflow_flush(ctx, false, NULL, NULL);
+ if (io_cqring_events(ctx))
mask |= EPOLLIN | EPOLLRDNORM;
return mask;
@@ -8663,7 +8697,7 @@ static void io_ring_exit_work(struct work_struct *work)
* as nobody else will be looking for them.
*/
do {
- io_iopoll_try_reap_events(ctx);
+ __io_uring_cancel_task_requests(ctx, NULL);
} while (!wait_for_completion_timeout(&ctx->ref_comp, HZ/20));
io_ring_ctx_free(ctx);
}
@@ -8679,10 +8713,14 @@ static void io_ring_ctx_wait_and_kill(struct io_ring_ctx *ctx)
{
mutex_lock(&ctx->uring_lock);
percpu_ref_kill(&ctx->refs);
+
+ if (WARN_ON_ONCE((ctx->flags & IORING_SETUP_SQPOLL) && !ctx->sqo_dead))
+ ctx->sqo_dead = 1;
+
/* if force is set, the ring is going away. always drop after that */
ctx->cq_overflow_flushed = 1;
if (ctx->rings)
- io_cqring_overflow_flush(ctx, true, NULL, NULL);
+ __io_cqring_overflow_flush(ctx, true, NULL, NULL);
mutex_unlock(&ctx->uring_lock);
io_kill_timeouts(ctx, NULL, NULL);
@@ -8818,9 +8856,11 @@ static void __io_uring_cancel_task_requests(struct io_ring_ctx *ctx,
enum io_wq_cancel cret;
bool ret = false;
- cret = io_wq_cancel_cb(ctx->io_wq, io_cancel_task_cb, &cancel, true);
- if (cret != IO_WQ_CANCEL_NOTFOUND)
- ret = true;
+ if (ctx->io_wq) {
+ cret = io_wq_cancel_cb(ctx->io_wq, io_cancel_task_cb,
+ &cancel, true);
+ ret |= (cret != IO_WQ_CANCEL_NOTFOUND);
+ }
/* SQPOLL thread does its own polling */
if (!(ctx->flags & IORING_SETUP_SQPOLL)) {
@@ -8839,6 +8879,18 @@ static void __io_uring_cancel_task_requests(struct io_ring_ctx *ctx,
}
}
+static void io_disable_sqo_submit(struct io_ring_ctx *ctx)
+{
+ WARN_ON_ONCE(ctx->sqo_task != current);
+
+ mutex_lock(&ctx->uring_lock);
+ ctx->sqo_dead = 1;
+ mutex_unlock(&ctx->uring_lock);
+
+ /* make sure callers enter the ring to get error */
+ io_ring_set_wakeup_flag(ctx);
+}
+
/*
* We need to iteratively cancel requests, in case a request has dependent
* hard links. These persist even for failure of cancelations, hence keep
@@ -8850,15 +8902,15 @@ static void io_uring_cancel_task_requests(struct io_ring_ctx *ctx,
struct task_struct *task = current;
if ((ctx->flags & IORING_SETUP_SQPOLL) && ctx->sq_data) {
+ /* for SQPOLL only sqo_task has task notes */
+ io_disable_sqo_submit(ctx);
task = ctx->sq_data->thread;
atomic_inc(&task->io_uring->in_idle);
io_sq_thread_park(ctx->sq_data);
}
io_cancel_defer_files(ctx, task, files);
- io_ring_submit_lock(ctx, (ctx->flags & IORING_SETUP_IOPOLL));
io_cqring_overflow_flush(ctx, true, task, files);
- io_ring_submit_unlock(ctx, (ctx->flags & IORING_SETUP_IOPOLL));
if (!files)
__io_uring_cancel_task_requests(ctx, task);
@@ -8931,20 +8983,12 @@ static void io_uring_del_task_file(struct file *file)
fput(file);
}
-/*
- * Drop task note for this file if we're the only ones that hold it after
- * pending fput()
- */
-static void io_uring_attempt_task_drop(struct file *file)
+static void io_uring_remove_task_files(struct io_uring_task *tctx)
{
- if (!current->io_uring)
- return;
- /*
- * fput() is pending, will be 2 if the only other ref is our potential
- * task file note. If the task is exiting, drop regardless of count.
- */
- if (fatal_signal_pending(current) || (current->flags & PF_EXITING) ||
- atomic_long_read(&file->f_count) == 2)
+ struct file *file;
+ unsigned long index;
+
+ xa_for_each(&tctx->xa, index, file)
io_uring_del_task_file(file);
}
@@ -8956,16 +9000,12 @@ void __io_uring_files_cancel(struct files_struct *files)
/* make sure overflow events are dropped */
atomic_inc(&tctx->in_idle);
-
- xa_for_each(&tctx->xa, index, file) {
- struct io_ring_ctx *ctx = file->private_data;
-
- io_uring_cancel_task_requests(ctx, files);
- if (files)
- io_uring_del_task_file(file);
- }
-
+ xa_for_each(&tctx->xa, index, file)
+ io_uring_cancel_task_requests(file->private_data, files);
atomic_dec(&tctx->in_idle);
+
+ if (files)
+ io_uring_remove_task_files(tctx);
}
static s64 tctx_inflight(struct io_uring_task *tctx)
@@ -9028,11 +9068,39 @@ void __io_uring_task_cancel(void)
} while (1);
atomic_dec(&tctx->in_idle);
+
+ io_uring_remove_task_files(tctx);
}
static int io_uring_flush(struct file *file, void *data)
{
- io_uring_attempt_task_drop(file);
+ struct io_uring_task *tctx = current->io_uring;
+ struct io_ring_ctx *ctx = file->private_data;
+
+ if (!tctx)
+ return 0;
+
+ /* we should have cancelled and erased it before PF_EXITING */
+ WARN_ON_ONCE((current->flags & PF_EXITING) &&
+ xa_load(&tctx->xa, (unsigned long)file));
+
+ /*
+ * fput() is pending, will be 2 if the only other ref is our potential
+ * task file note. If the task is exiting, drop regardless of count.
+ */
+ if (atomic_long_read(&file->f_count) != 2)
+ return 0;
+
+ if (ctx->flags & IORING_SETUP_SQPOLL) {
+ /* there is only one file note, which is owned by sqo_task */
+ WARN_ON_ONCE((ctx->sqo_task == current) ==
+ !xa_load(&tctx->xa, (unsigned long)file));
+
+ io_disable_sqo_submit(ctx);
+ }
+
+ if (!(ctx->flags & IORING_SETUP_SQPOLL) || ctx->sqo_task == current)
+ io_uring_del_task_file(file);
return 0;
}
@@ -9106,8 +9174,9 @@ static unsigned long io_uring_nommu_get_unmapped_area(struct file *file,
#endif /* !CONFIG_MMU */
-static void io_sqpoll_wait_sq(struct io_ring_ctx *ctx)
+static int io_sqpoll_wait_sq(struct io_ring_ctx *ctx)
{
+ int ret = 0;
DEFINE_WAIT(wait);
do {
@@ -9116,6 +9185,11 @@ static void io_sqpoll_wait_sq(struct io_ring_ctx *ctx)
prepare_to_wait(&ctx->sqo_sq_wait, &wait, TASK_INTERRUPTIBLE);
+ if (unlikely(ctx->sqo_dead)) {
+ ret = -EOWNERDEAD;
+ goto out;
+ }
+
if (!io_sqring_full(ctx))
break;
@@ -9123,6 +9197,8 @@ static void io_sqpoll_wait_sq(struct io_ring_ctx *ctx)
} while (!signal_pending(current));
finish_wait(&ctx->sqo_sq_wait, &wait);
+out:
+ return ret;
}
static int io_get_ext_arg(unsigned flags, const void __user *argp, size_t *argsz,
@@ -9194,17 +9270,18 @@ SYSCALL_DEFINE6(io_uring_enter, unsigned int, fd, u32, to_submit,
*/
ret = 0;
if (ctx->flags & IORING_SETUP_SQPOLL) {
- if (!list_empty_careful(&ctx->cq_overflow_list)) {
- bool needs_lock = ctx->flags & IORING_SETUP_IOPOLL;
+ io_cqring_overflow_flush(ctx, false, NULL, NULL);
- io_ring_submit_lock(ctx, needs_lock);
- io_cqring_overflow_flush(ctx, false, NULL, NULL);
- io_ring_submit_unlock(ctx, needs_lock);
- }
+ ret = -EOWNERDEAD;
+ if (unlikely(ctx->sqo_dead))
+ goto out;
if (flags & IORING_ENTER_SQ_WAKEUP)
wake_up(&ctx->sq_data->wait);
- if (flags & IORING_ENTER_SQ_WAIT)
- io_sqpoll_wait_sq(ctx);
+ if (flags & IORING_ENTER_SQ_WAIT) {
+ ret = io_sqpoll_wait_sq(ctx);
+ if (ret)
+ goto out;
+ }
submitted = to_submit;
} else if (to_submit) {
ret = io_uring_add_task_file(ctx, f.file);
@@ -9631,6 +9708,7 @@ static int io_uring_create(unsigned entries, struct io_uring_params *p,
trace_io_uring_create(ret, ctx, p->sq_entries, p->cq_entries, p->flags);
return ret;
err:
+ io_disable_sqo_submit(ctx);
io_ring_ctx_wait_and_kill(ctx);
return ret;
}