summaryrefslogtreecommitdiff
path: root/io_uring/tw.c
diff options
context:
space:
mode:
Diffstat (limited to 'io_uring/tw.c')
-rw-r--r--io_uring/tw.c22
1 files changed, 20 insertions, 2 deletions
diff --git a/io_uring/tw.c b/io_uring/tw.c
index 1ee2b8ab07c8..2f2b4ac4b126 100644
--- a/io_uring/tw.c
+++ b/io_uring/tw.c
@@ -152,6 +152,21 @@ void tctx_task_work(struct callback_head *cb)
WARN_ON_ONCE(ret);
}
+/*
+ * Sets IORING_SQ_TASKRUN in the sq_flags shared with userspace, using the
+ * RCU protected rings pointer to be safe against concurrent ring resizing.
+ */
+static void io_ctx_mark_taskrun(struct io_ring_ctx *ctx)
+{
+ lockdep_assert_in_rcu_read_lock();
+
+ if (ctx->flags & IORING_SETUP_TASKRUN_FLAG) {
+ struct io_rings *rings = rcu_dereference(ctx->rings_rcu);
+
+ atomic_or(IORING_SQ_TASKRUN, &rings->sq_flags);
+ }
+}
+
void io_req_local_work_add(struct io_kiocb *req, unsigned flags)
{
struct io_ring_ctx *ctx = req->ctx;
@@ -206,8 +221,7 @@ void io_req_local_work_add(struct io_kiocb *req, unsigned flags)
*/
if (!head) {
- if (ctx->flags & IORING_SETUP_TASKRUN_FLAG)
- atomic_or(IORING_SQ_TASKRUN, &ctx->rings->sq_flags);
+ io_ctx_mark_taskrun(ctx);
if (ctx->has_evfd)
io_eventfd_signal(ctx, false);
}
@@ -231,6 +245,10 @@ void io_req_normal_work_add(struct io_kiocb *req)
if (!llist_add(&req->io_task_work.node, &tctx->task_list))
return;
+ /*
+ * Doesn't need to use ->rings_rcu, as resizing isn't supported for
+ * !DEFER_TASKRUN.
+ */
if (ctx->flags & IORING_SETUP_TASKRUN_FLAG)
atomic_or(IORING_SQ_TASKRUN, &ctx->rings->sq_flags);