Skip to content

Commit

Permalink
Merge tag 'io_uring-5.14-2021-07-30' of git://git.kernel.dk/linux-block
Browse files Browse the repository at this point in the history
Pull io_uring fixes from Jens Axboe:

 - A fix for block backed reissue (me)

 - Reissue context hardening (me)

 - Async link locking fix (Pavel)

* tag 'io_uring-5.14-2021-07-30' of git://git.kernel.dk/linux-block:
  io_uring: fix poll requests leaking second poll entries
  io_uring: don't block level reissue off completion path
  io_uring: always reissue from task_work context
  io_uring: fix race in unified task_work running
  io_uring: fix io_prep_async_link locking
  • Loading branch information
torvalds committed Jul 30, 2021
2 parents f6c5971 + a890d01 commit 27eb687
Showing 1 changed file with 32 additions and 8 deletions.
40 changes: 32 additions & 8 deletions fs/io_uring.c
Original file line number Diff line number Diff line change
Expand Up @@ -1279,8 +1279,17 @@ static void io_prep_async_link(struct io_kiocb *req)
{
struct io_kiocb *cur;

io_for_each_link(cur, req)
io_prep_async_work(cur);
if (req->flags & REQ_F_LINK_TIMEOUT) {
struct io_ring_ctx *ctx = req->ctx;

spin_lock_irq(&ctx->completion_lock);
io_for_each_link(cur, req)
io_prep_async_work(cur);
spin_unlock_irq(&ctx->completion_lock);
} else {
io_for_each_link(cur, req)
io_prep_async_work(cur);
}
}

static void io_queue_async_work(struct io_kiocb *req)
Expand Down Expand Up @@ -1950,9 +1959,13 @@ static void tctx_task_work(struct callback_head *cb)
node = next;
}
if (wq_list_empty(&tctx->task_list)) {
spin_lock_irq(&tctx->task_lock);
clear_bit(0, &tctx->task_state);
if (wq_list_empty(&tctx->task_list))
if (wq_list_empty(&tctx->task_list)) {
spin_unlock_irq(&tctx->task_lock);
break;
}
spin_unlock_irq(&tctx->task_lock);
/* another tctx_task_work() is enqueued, yield */
if (test_and_set_bit(0, &tctx->task_state))
break;
Expand Down Expand Up @@ -2047,6 +2060,12 @@ static void io_req_task_queue(struct io_kiocb *req)
io_req_task_work_add(req);
}

static void io_req_task_queue_reissue(struct io_kiocb *req)
{
req->io_task_work.func = io_queue_async_work;
io_req_task_work_add(req);
}

static inline void io_queue_next(struct io_kiocb *req)
{
struct io_kiocb *nxt = io_req_find_next(req);
Expand Down Expand Up @@ -2235,7 +2254,7 @@ static void io_iopoll_complete(struct io_ring_ctx *ctx, unsigned int *nr_events,
!(req->flags & REQ_F_DONT_REISSUE)) {
req->iopoll_completed = 0;
req_ref_get(req);
io_queue_async_work(req);
io_req_task_queue_reissue(req);
continue;
}

Expand Down Expand Up @@ -2428,6 +2447,12 @@ static bool io_rw_should_reissue(struct io_kiocb *req)
*/
if (percpu_ref_is_dying(&ctx->refs))
return false;
/*
* Play it safe and assume not safe to re-import and reissue if we're
* not in the original thread group (or in task context).
*/
if (!same_thread_group(req->task, current) || !in_task())
return false;
return true;
}
#else
Expand Down Expand Up @@ -2758,7 +2783,7 @@ static void kiocb_done(struct kiocb *kiocb, ssize_t ret,
req->flags &= ~REQ_F_REISSUE;
if (io_resubmit_prep(req)) {
req_ref_get(req);
io_queue_async_work(req);
io_req_task_queue_reissue(req);
} else {
int cflags = 0;

Expand Down Expand Up @@ -4914,7 +4939,6 @@ static bool io_poll_complete(struct io_kiocb *req, __poll_t mask)
if (req->poll.events & EPOLLONESHOT)
flags = 0;
if (!io_cqring_fill_event(ctx, req->user_data, error, flags)) {
io_poll_remove_waitqs(req);
req->poll.done = true;
flags = 0;
}
Expand All @@ -4937,6 +4961,7 @@ static void io_poll_task_func(struct io_kiocb *req)

done = io_poll_complete(req, req->result);
if (done) {
io_poll_remove_double(req);
hash_del(&req->hash_node);
} else {
req->result = 0;
Expand Down Expand Up @@ -5124,7 +5149,7 @@ static __poll_t __io_arm_poll_handler(struct io_kiocb *req,
ipt->error = -EINVAL;

spin_lock_irq(&ctx->completion_lock);
if (ipt->error)
if (ipt->error || (mask && (poll->events & EPOLLONESHOT)))
io_poll_remove_double(req);
if (likely(poll->head)) {
spin_lock(&poll->head->lock);
Expand Down Expand Up @@ -5196,7 +5221,6 @@ static int io_arm_poll_handler(struct io_kiocb *req)
ret = __io_arm_poll_handler(req, &apoll->poll, &ipt, mask,
io_async_wake);
if (ret || ipt.error) {
io_poll_remove_double(req);
spin_unlock_irq(&ctx->completion_lock);
if (ret)
return IO_APOLL_READY;
Expand Down

0 comments on commit 27eb687

Please sign in to comment.