Skip to content

Commit

Permalink
io_uring: remove 'twa_signal_ok' deadlock work-around
Browse files Browse the repository at this point in the history
The TIF_NOTIFY_SIGNAL based implementation of TWA_SIGNAL is always safe
to use, regardless of context, as we won't be recursing into the signal
lock. So now that all archs are using that, we can drop this deadlock
work-around as it's always safe to use TWA_SIGNAL.

Signed-off-by: Jens Axboe <[email protected]>
  • Loading branch information
axboe committed Dec 12, 2020
1 parent e296dc4 commit 355fb9e
Showing 1 changed file with 6 additions and 15 deletions.
21 changes: 6 additions & 15 deletions fs/io_uring.c
Original file line number Diff line number Diff line change
Expand Up @@ -1995,7 +1995,7 @@ static struct io_kiocb *io_req_find_next(struct io_kiocb *req)
return __io_req_find_next(req);
}

static int io_req_task_work_add(struct io_kiocb *req, bool twa_signal_ok)
static int io_req_task_work_add(struct io_kiocb *req)
{
struct task_struct *tsk = req->task;
struct io_ring_ctx *ctx = req->ctx;
Expand All @@ -2012,7 +2012,7 @@ static int io_req_task_work_add(struct io_kiocb *req, bool twa_signal_ok)
* will do the job.
*/
notify = TWA_NONE;
if (!(ctx->flags & IORING_SETUP_SQPOLL) && twa_signal_ok)
if (!(ctx->flags & IORING_SETUP_SQPOLL))
notify = TWA_SIGNAL;

ret = task_work_add(tsk, &req->task_work, notify);
Expand Down Expand Up @@ -2074,7 +2074,7 @@ static void io_req_task_queue(struct io_kiocb *req)
init_task_work(&req->task_work, io_req_task_submit);
percpu_ref_get(&req->ctx->refs);

ret = io_req_task_work_add(req, true);
ret = io_req_task_work_add(req);
if (unlikely(ret)) {
struct task_struct *tsk;

Expand Down Expand Up @@ -2196,7 +2196,7 @@ static void io_free_req_deferred(struct io_kiocb *req)
int ret;

init_task_work(&req->task_work, io_put_req_deferred_cb);
ret = io_req_task_work_add(req, true);
ret = io_req_task_work_add(req);
if (unlikely(ret)) {
struct task_struct *tsk;

Expand Down Expand Up @@ -3305,7 +3305,7 @@ static int io_async_buf_func(struct wait_queue_entry *wait, unsigned mode,

/* submit ref gets dropped, acquire a new one */
refcount_inc(&req->refs);
ret = io_req_task_work_add(req, true);
ret = io_req_task_work_add(req);
if (unlikely(ret)) {
struct task_struct *tsk;

Expand Down Expand Up @@ -4843,7 +4843,6 @@ struct io_poll_table {
static int __io_async_wake(struct io_kiocb *req, struct io_poll_iocb *poll,
__poll_t mask, task_work_func_t func)
{
bool twa_signal_ok;
int ret;

/* for instances that support it check for an event match first: */
Expand All @@ -4858,21 +4857,13 @@ static int __io_async_wake(struct io_kiocb *req, struct io_poll_iocb *poll,
init_task_work(&req->task_work, func);
percpu_ref_get(&req->ctx->refs);

/*
* If we using the signalfd wait_queue_head for this wakeup, then
* it's not safe to use TWA_SIGNAL as we could be recursing on the
* tsk->sighand->siglock on doing the wakeup. Should not be needed
* either, as the normal wakeup will suffice.
*/
twa_signal_ok = (poll->head != &req->task->sighand->signalfd_wqh);

/*
* If this fails, then the task is exiting. When a task exits, the
* work gets canceled, so just cancel this request as well instead
* of executing it. We can't safely execute it anyway, as we may not
* have the needed state needed for it anyway.
*/
ret = io_req_task_work_add(req, twa_signal_ok);
ret = io_req_task_work_add(req);
if (unlikely(ret)) {
struct task_struct *tsk;

Expand Down

0 comments on commit 355fb9e

Please sign in to comment.