aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJens Axboe <axboe@kernel.dk>2021-03-03 15:47:04 -0700
committerJens Axboe <axboe@kernel.dk>2021-03-04 06:38:10 -0700
commitf01272541d2cd7b7f24909d63ea2b028a6a66293 (patch)
tree94b31ecf630b34f0bb9c686aced0dabedaac470a
parente4b4a13f494120c475580927864cc1dd96f595d1 (diff)
downloadlinux-stericsson-f01272541d2cd7b7f24909d63ea2b028a6a66293.tar.gz
io-wq: ensure all pending work is canceled on exit
If we race on shutting down the io-wq, then we should ensure that any work that was queued after workers shutdown is canceled. Harden the add work check a bit too, checking for IO_WQ_BIT_EXIT and cancel if it's set. Add a WARN_ON() for having any work before we kill the io-wq context. Reported-by: syzbot+91b4b56ead187d35c9d3@syzkaller.appspotmail.com Signed-off-by: Jens Axboe <axboe@kernel.dk>
-rw-r--r--fs/io-wq.c42
1 files changed, 33 insertions, 9 deletions
diff --git a/fs/io-wq.c b/fs/io-wq.c
index acffc85d1a93..19f18389ead2 100644
--- a/fs/io-wq.c
+++ b/fs/io-wq.c
@@ -129,6 +129,17 @@ struct io_wq {
static enum cpuhp_state io_wq_online;
+struct io_cb_cancel_data {
+ work_cancel_fn *fn;
+ void *data;
+ int nr_running;
+ int nr_pending;
+ bool cancel_all;
+};
+
+static void io_wqe_cancel_pending_work(struct io_wqe *wqe,
+ struct io_cb_cancel_data *match);
+
static bool io_worker_get(struct io_worker *worker)
{
return refcount_inc_not_zero(&worker->ref);
@@ -713,6 +724,23 @@ static void io_wq_check_workers(struct io_wq *wq)
}
}
+static bool io_wq_work_match_all(struct io_wq_work *work, void *data)
+{
+ return true;
+}
+
+static void io_wq_cancel_pending(struct io_wq *wq)
+{
+ struct io_cb_cancel_data match = {
+ .fn = io_wq_work_match_all,
+ .cancel_all = true,
+ };
+ int node;
+
+ for_each_node(node)
+ io_wqe_cancel_pending_work(wq->wqes[node], &match);
+}
+
/*
* Manager thread. Tasked with creating new workers, if we need them.
*/
@@ -748,6 +776,8 @@ static int io_wq_manager(void *data)
/* we might not ever have created any workers */
if (atomic_read(&wq->worker_refs))
wait_for_completion(&wq->worker_done);
+
+ io_wq_cancel_pending(wq);
complete(&wq->exited);
do_exit(0);
}
@@ -809,7 +839,8 @@ static void io_wqe_enqueue(struct io_wqe *wqe, struct io_wq_work *work)
unsigned long flags;
/* Can only happen if manager creation fails after exec */
- if (unlikely(io_wq_fork_manager(wqe->wq))) {
+ if (io_wq_fork_manager(wqe->wq) ||
+ test_bit(IO_WQ_BIT_EXIT, &wqe->wq->state)) {
work->flags |= IO_WQ_WORK_CANCEL;
wqe->wq->do_work(work);
return;
@@ -845,14 +876,6 @@ void io_wq_hash_work(struct io_wq_work *work, void *val)
work->flags |= (IO_WQ_WORK_HASHED | (bit << IO_WQ_HASH_SHIFT));
}
-struct io_cb_cancel_data {
- work_cancel_fn *fn;
- void *data;
- int nr_running;
- int nr_pending;
- bool cancel_all;
-};
-
static bool io_wq_worker_cancel(struct io_worker *worker, void *data)
{
struct io_cb_cancel_data *match = data;
@@ -1086,6 +1109,7 @@ static void io_wq_destroy(struct io_wq *wq)
struct io_wqe *wqe = wq->wqes[node];
list_del_init(&wqe->wait.entry);
+ WARN_ON_ONCE(!wq_list_empty(&wqe->work_list));
kfree(wqe);
}
spin_unlock_irq(&wq->hash->wait.lock);