Skip to content

Commit

Permalink
io-wq: ensure all pending work is canceled on exit
Browse files Browse the repository at this point in the history
If we race on shutting down the io-wq, then we should ensure that any
work that was queued after workers shutdown is canceled. Harden the
add work check a bit too, checking for IO_WQ_BIT_EXIT and cancel if
it's set.

Add a WARN_ON() for having any work before we kill the io-wq context.

Reported-by: [email protected]
Signed-off-by: Jens Axboe <[email protected]>
  • Loading branch information
axboe committed Mar 4, 2021
1 parent e4b4a13 commit f012725
Showing 1 changed file with 33 additions and 9 deletions.
42 changes: 33 additions & 9 deletions fs/io-wq.c
Original file line number Diff line number Diff line change
Expand Up @@ -129,6 +129,17 @@ struct io_wq {

static enum cpuhp_state io_wq_online;

struct io_cb_cancel_data {
work_cancel_fn *fn;
void *data;
int nr_running;
int nr_pending;
bool cancel_all;
};

static void io_wqe_cancel_pending_work(struct io_wqe *wqe,
struct io_cb_cancel_data *match);

static bool io_worker_get(struct io_worker *worker)
{
return refcount_inc_not_zero(&worker->ref);
Expand Down Expand Up @@ -713,6 +724,23 @@ static void io_wq_check_workers(struct io_wq *wq)
}
}

static bool io_wq_work_match_all(struct io_wq_work *work, void *data)
{
return true;
}

static void io_wq_cancel_pending(struct io_wq *wq)
{
struct io_cb_cancel_data match = {
.fn = io_wq_work_match_all,
.cancel_all = true,
};
int node;

for_each_node(node)
io_wqe_cancel_pending_work(wq->wqes[node], &match);
}

/*
* Manager thread. Tasked with creating new workers, if we need them.
*/
Expand Down Expand Up @@ -748,6 +776,8 @@ static int io_wq_manager(void *data)
/* we might not ever have created any workers */
if (atomic_read(&wq->worker_refs))
wait_for_completion(&wq->worker_done);

io_wq_cancel_pending(wq);
complete(&wq->exited);
do_exit(0);
}
Expand Down Expand Up @@ -809,7 +839,8 @@ static void io_wqe_enqueue(struct io_wqe *wqe, struct io_wq_work *work)
unsigned long flags;

/* Can only happen if manager creation fails after exec */
if (unlikely(io_wq_fork_manager(wqe->wq))) {
if (io_wq_fork_manager(wqe->wq) ||
test_bit(IO_WQ_BIT_EXIT, &wqe->wq->state)) {
work->flags |= IO_WQ_WORK_CANCEL;
wqe->wq->do_work(work);
return;
Expand Down Expand Up @@ -845,14 +876,6 @@ void io_wq_hash_work(struct io_wq_work *work, void *val)
work->flags |= (IO_WQ_WORK_HASHED | (bit << IO_WQ_HASH_SHIFT));
}

struct io_cb_cancel_data {
work_cancel_fn *fn;
void *data;
int nr_running;
int nr_pending;
bool cancel_all;
};

static bool io_wq_worker_cancel(struct io_worker *worker, void *data)
{
struct io_cb_cancel_data *match = data;
Expand Down Expand Up @@ -1086,6 +1109,7 @@ static void io_wq_destroy(struct io_wq *wq)
struct io_wqe *wqe = wq->wqes[node];

list_del_init(&wqe->wait.entry);
WARN_ON_ONCE(!wq_list_empty(&wqe->work_list));
kfree(wqe);
}
spin_unlock_irq(&wq->hash->wait.lock);
Expand Down

0 comments on commit f012725

Please sign in to comment.