io_uring: don't take ctx refs in task_work handler
__tctx_task_work() guarantees that ctx won't be killed while running
task_works, so we can remove now unnecessary ctx pinning for internally
armed polling.
Signed-off-by: Pavel Begunkov <asml.silence@gmail.com>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
diff --git a/fs/io_uring.c b/fs/io_uring.c
index 962a358..12e2ec7 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -4882,7 +4882,6 @@ static int __io_async_wake(struct io_kiocb *req, struct io_poll_iocb *poll,
req->result = mask;
req->task_work.func = func;
- percpu_ref_get(&req->ctx->refs);
/*
* If this fails, then the task is exiting. When a task exits, the
@@ -4982,8 +4981,6 @@ static void io_poll_task_func(struct callback_head *cb)
if (nxt)
__io_req_task_submit(nxt);
}
-
- percpu_ref_put(&ctx->refs);
}
static int io_poll_double_wake(struct wait_queue_entry *wait, unsigned mode,
@@ -5090,7 +5087,6 @@ static void io_async_task_func(struct callback_head *cb)
if (io_poll_rewait(req, &apoll->poll)) {
spin_unlock_irq(&ctx->completion_lock);
- percpu_ref_put(&ctx->refs);
return;
}
@@ -5106,7 +5102,6 @@ static void io_async_task_func(struct callback_head *cb)
else
__io_req_task_cancel(req, -ECANCELED);
- percpu_ref_put(&ctx->refs);
kfree(apoll->double_poll);
kfree(apoll);
}