Skip to content

Commit

Permalink
io_uring: do post-completion chore on t-out cancel
Browse files Browse the repository at this point in the history
Don't forget about io_commit_cqring() + io_cqring_ev_posted() after
exit/exec cancelling timeouts. Both functions declared only after
io_kill_timeouts(), so to avoid tons of forward declarations move
it down.

Signed-off-by: Pavel Begunkov <[email protected]>
Link: https://lore.kernel.org/r/72ace588772c0f14834a6a4185d56c445a366fb4.1616696997.git.asml.silence@gmail.com
Signed-off-by: Jens Axboe <[email protected]>
  • Loading branch information
isilence authored and axboe committed Mar 27, 2021
1 parent 1ee4160 commit 80c4cbd
Showing 1 changed file with 22 additions and 20 deletions.
42 changes: 22 additions & 20 deletions fs/io_uring.c
Original file line number Diff line number Diff line change
Expand Up @@ -1262,26 +1262,6 @@ static void io_kill_timeout(struct io_kiocb *req, int status)
}
}

/*
* Returns true if we found and killed one or more timeouts
*/
static bool io_kill_timeouts(struct io_ring_ctx *ctx, struct task_struct *tsk,
struct files_struct *files)
{
struct io_kiocb *req, *tmp;
int canceled = 0;

spin_lock_irq(&ctx->completion_lock);
list_for_each_entry_safe(req, tmp, &ctx->timeout_list, timeout.list) {
if (io_match_task(req, tsk, files)) {
io_kill_timeout(req, -ECANCELED);
canceled++;
}
}
spin_unlock_irq(&ctx->completion_lock);
return canceled != 0;
}

static void __io_queue_deferred(struct io_ring_ctx *ctx)
{
do {
Expand Down Expand Up @@ -8611,6 +8591,28 @@ static void io_ring_exit_work(struct work_struct *work)
io_ring_ctx_free(ctx);
}

/* Returns true if we found and killed one or more timeouts */
static bool io_kill_timeouts(struct io_ring_ctx *ctx, struct task_struct *tsk,
struct files_struct *files)
{
struct io_kiocb *req, *tmp;
int canceled = 0;

spin_lock_irq(&ctx->completion_lock);
list_for_each_entry_safe(req, tmp, &ctx->timeout_list, timeout.list) {
if (io_match_task(req, tsk, files)) {
io_kill_timeout(req, -ECANCELED);
canceled++;
}
}
io_commit_cqring(ctx);
spin_unlock_irq(&ctx->completion_lock);

if (canceled != 0)
io_cqring_ev_posted(ctx);
return canceled != 0;
}

static void io_ring_ctx_wait_and_kill(struct io_ring_ctx *ctx)
{
unsigned long index;
Expand Down

0 comments on commit 80c4cbd

Please sign in to comment.