summaryrefslogtreecommitdiff
path: root/io_uring/io_uring.h
diff options
context:
space:
mode:
authorJens Axboe <axboe@kernel.dk>2025-07-15 12:20:06 -0600
committerJens Axboe <axboe@kernel.dk>2025-07-15 12:20:06 -0600
commit8723c146ad4ca17d340213f3676ce1829668b79b (patch)
treeb4834cd583a879c6ff442b435fccf310f9951ddc /io_uring/io_uring.h
parentb1915b18e1d00eb4e8babcdc2ca3a64b43e20e9a (diff)
io_uring: deduplicate wakeup handling
Both io_poll_wq_wake() and io_cqring_wake() contain the exact same code, and most of the comment in the latter applies equally to both. Move the test and wakeup handling into a basic helper that they can both use, and move part of the comment that applies generically to this new helper. Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'io_uring/io_uring.h')
-rw-r--r--io_uring/io_uring.h27
1 files changed, 16 insertions, 11 deletions
diff --git a/io_uring/io_uring.h b/io_uring/io_uring.h
index dc17162e7af1..abc6de227f74 100644
--- a/io_uring/io_uring.h
+++ b/io_uring/io_uring.h
@@ -294,11 +294,22 @@ static inline void io_commit_cqring(struct io_ring_ctx *ctx)
smp_store_release(&ctx->rings->cq.tail, ctx->cached_cq_tail);
}
+static inline void __io_wq_wake(struct wait_queue_head *wq)
+{
+ /*
+ *
+ * Pass in EPOLLIN|EPOLL_URING_WAKE as the poll wakeup key. The latter
+ * set in the mask so that if we recurse back into our own poll
+ * waitqueue handlers, we know we have a dependency between eventfd or
+ * epoll and should terminate multishot poll at that point.
+ */
+ if (wq_has_sleeper(wq))
+ __wake_up(wq, TASK_NORMAL, 0, poll_to_key(EPOLL_URING_WAKE | EPOLLIN));
+}
+
static inline void io_poll_wq_wake(struct io_ring_ctx *ctx)
{
- if (wq_has_sleeper(&ctx->poll_wq))
- __wake_up(&ctx->poll_wq, TASK_NORMAL, 0,
- poll_to_key(EPOLL_URING_WAKE | EPOLLIN));
+ __io_wq_wake(&ctx->poll_wq);
}
static inline void io_cqring_wake(struct io_ring_ctx *ctx)
@@ -307,15 +318,9 @@ static inline void io_cqring_wake(struct io_ring_ctx *ctx)
* Trigger waitqueue handler on all waiters on our waitqueue. This
* won't necessarily wake up all the tasks, io_should_wake() will make
* that decision.
- *
- * Pass in EPOLLIN|EPOLL_URING_WAKE as the poll wakeup key. The latter
- * set in the mask so that if we recurse back into our own poll
- * waitqueue handlers, we know we have a dependency between eventfd or
- * epoll and should terminate multishot poll at that point.
*/
- if (wq_has_sleeper(&ctx->cq_wait))
- __wake_up(&ctx->cq_wait, TASK_NORMAL, 0,
- poll_to_key(EPOLL_URING_WAKE | EPOLLIN));
+
+ __io_wq_wake(&ctx->cq_wait);
}
static inline bool io_sqring_full(struct io_ring_ctx *ctx)