path: root/include/linux/io_uring.h
diff options
authorJens Axboe <axboe@kernel.dk>2021-02-10 00:03:20 +0000
committerJens Axboe <axboe@kernel.dk>2021-02-10 07:28:43 -0700
commit7cbf1722d5fc5779946ee8f338e9e38b5de15856 (patch)
treecafc7b4a2cc122789ca7c1b2045d44ec2a36847a /include/linux/io_uring.h
parent1b4c351f6eb7467c77fc19e0cd7e5f0083ecd847 (diff)
io_uring: provide FIFO ordering for task_work
task_work is a LIFO list, due to how it's implemented as a lockless list. For long chains of task_work, this can be problematic as the first entry added is the last one processed. Similarly, we'd waste a lot of CPU cycles reversing this list. Wrap the task_work so we have a single task_work entry per task per ctx, and use that to run it in the right order. Signed-off-by: Pavel Begunkov <asml.silence@gmail.com> Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'include/linux/io_uring.h')
1 files changed, 14 insertions, 0 deletions
diff --git a/include/linux/io_uring.h b/include/linux/io_uring.h
index 35b2d845704d..2eb6d19de336 100644
--- a/include/linux/io_uring.h
+++ b/include/linux/io_uring.h
@@ -22,6 +22,15 @@ struct io_identity {
refcount_t count;
+struct io_wq_work_node {
+ struct io_wq_work_node *next;
+struct io_wq_work_list {
+ struct io_wq_work_node *first;
+ struct io_wq_work_node *last;
struct io_uring_task {
/* submission side */
struct xarray xa;
@@ -32,6 +41,11 @@ struct io_uring_task {
struct io_identity *identity;
atomic_t in_idle;
bool sqpoll;
+ spinlock_t task_lock;
+ struct io_wq_work_list task_list;
+ unsigned long task_state;
+ struct callback_head task_work;
#if defined(CONFIG_IO_URING)