Skip to content

Commit

Permalink
Merge tag 'io_uring-5.8-2020-06-19' of git://git.kernel.dk/linux-block
Browse files Browse the repository at this point in the history
Pull io_uring fixes from Jens Axboe:

 - Catch a case where io_sq_thread() didn't do proper mm acquire

 - Ensure poll completions are reaped on shutdown

 - Async cancelation and run fixes (Pavel)

 - io-poll race fixes (Xiaoguang)

 - Request cleanup race fix (Xiaoguang)

* tag 'io_uring-5.8-2020-06-19' of git://git.kernel.dk/linux-block:
  io_uring: fix possible race condition against REQ_F_NEED_CLEANUP
  io_uring: reap poll completions while waiting for refs to drop on exit
  io_uring: acquire 'mm' for task_work for SQPOLL
  io_uring: add memory barrier to synchronize io_kiocb's result and iopoll_completed
  io_uring: don't fail links for EAGAIN error in IOPOLL mode
  io_uring: cancel by ->task not pid
  io_uring: lazy get task
  io_uring: batch cancel in io_uring_cancel_files()
  io_uring: cancel all task's requests on exit
  io-wq: add an option to cancel all matched reqs
  io-wq: reorder cancellation pending -> running
  io_uring: fix lazy work init
  • Loading branch information
torvalds committed Jun 19, 2020
2 parents d2b1c81 + 6f2cc16 commit 4333a9b
Show file tree
Hide file tree
Showing 3 changed files with 177 additions and 112 deletions.
108 changes: 56 additions & 52 deletions fs/io-wq.c
Original file line number Diff line number Diff line change
Expand Up @@ -903,13 +903,15 @@ void io_wq_cancel_all(struct io_wq *wq)
struct io_cb_cancel_data {
work_cancel_fn *fn;
void *data;
int nr_running;
int nr_pending;
bool cancel_all;
};

static bool io_wq_worker_cancel(struct io_worker *worker, void *data)
{
struct io_cb_cancel_data *match = data;
unsigned long flags;
bool ret = false;

/*
* Hold the lock to avoid ->cur_work going out of scope, caller
Expand All @@ -920,74 +922,90 @@ static bool io_wq_worker_cancel(struct io_worker *worker, void *data)
!(worker->cur_work->flags & IO_WQ_WORK_NO_CANCEL) &&
match->fn(worker->cur_work, match->data)) {
send_sig(SIGINT, worker->task, 1);
ret = true;
match->nr_running++;
}
spin_unlock_irqrestore(&worker->lock, flags);

return ret;
return match->nr_running && !match->cancel_all;
}

static enum io_wq_cancel io_wqe_cancel_work(struct io_wqe *wqe,
struct io_cb_cancel_data *match)
static void io_wqe_cancel_pending_work(struct io_wqe *wqe,
struct io_cb_cancel_data *match)
{
struct io_wq_work_node *node, *prev;
struct io_wq_work *work;
unsigned long flags;
bool found = false;

/*
* First check pending list, if we're lucky we can just remove it
* from there. CANCEL_OK means that the work is returned as-new,
* no completion will be posted for it.
*/
retry:
spin_lock_irqsave(&wqe->lock, flags);
wq_list_for_each(node, prev, &wqe->work_list) {
work = container_of(node, struct io_wq_work, list);
if (!match->fn(work, match->data))
continue;

if (match->fn(work, match->data)) {
wq_list_del(&wqe->work_list, node, prev);
found = true;
break;
}
}
spin_unlock_irqrestore(&wqe->lock, flags);

if (found) {
wq_list_del(&wqe->work_list, node, prev);
spin_unlock_irqrestore(&wqe->lock, flags);
io_run_cancel(work, wqe);
return IO_WQ_CANCEL_OK;
match->nr_pending++;
if (!match->cancel_all)
return;

/* not safe to continue after unlock */
goto retry;
}
spin_unlock_irqrestore(&wqe->lock, flags);
}

/*
* Now check if a free (going busy) or busy worker has the work
* currently running. If we find it there, we'll return CANCEL_RUNNING
* as an indication that we attempt to signal cancellation. The
* completion will run normally in this case.
*/
static void io_wqe_cancel_running_work(struct io_wqe *wqe,
struct io_cb_cancel_data *match)
{
rcu_read_lock();
found = io_wq_for_each_worker(wqe, io_wq_worker_cancel, match);
io_wq_for_each_worker(wqe, io_wq_worker_cancel, match);
rcu_read_unlock();
return found ? IO_WQ_CANCEL_RUNNING : IO_WQ_CANCEL_NOTFOUND;
}

enum io_wq_cancel io_wq_cancel_cb(struct io_wq *wq, work_cancel_fn *cancel,
void *data)
void *data, bool cancel_all)
{
struct io_cb_cancel_data match = {
.fn = cancel,
.data = data,
.fn = cancel,
.data = data,
.cancel_all = cancel_all,
};
enum io_wq_cancel ret = IO_WQ_CANCEL_NOTFOUND;
int node;

/*
* First check pending list, if we're lucky we can just remove it
* from there. CANCEL_OK means that the work is returned as-new,
* no completion will be posted for it.
*/
for_each_node(node) {
struct io_wqe *wqe = wq->wqes[node];

ret = io_wqe_cancel_work(wqe, &match);
if (ret != IO_WQ_CANCEL_NOTFOUND)
break;
io_wqe_cancel_pending_work(wqe, &match);
if (match.nr_pending && !match.cancel_all)
return IO_WQ_CANCEL_OK;
}

return ret;
/*
* Now check if a free (going busy) or busy worker has the work
* currently running. If we find it there, we'll return CANCEL_RUNNING
* as an indication that we attempt to signal cancellation. The
* completion will run normally in this case.
*/
for_each_node(node) {
struct io_wqe *wqe = wq->wqes[node];

io_wqe_cancel_running_work(wqe, &match);
if (match.nr_running && !match.cancel_all)
return IO_WQ_CANCEL_RUNNING;
}

if (match.nr_running)
return IO_WQ_CANCEL_RUNNING;
if (match.nr_pending)
return IO_WQ_CANCEL_OK;
return IO_WQ_CANCEL_NOTFOUND;
}

static bool io_wq_io_cb_cancel_data(struct io_wq_work *work, void *data)
Expand All @@ -997,21 +1015,7 @@ static bool io_wq_io_cb_cancel_data(struct io_wq_work *work, void *data)

enum io_wq_cancel io_wq_cancel_work(struct io_wq *wq, struct io_wq_work *cwork)
{
return io_wq_cancel_cb(wq, io_wq_io_cb_cancel_data, (void *)cwork);
}

static bool io_wq_pid_match(struct io_wq_work *work, void *data)
{
pid_t pid = (pid_t) (unsigned long) data;

return work->task_pid == pid;
}

enum io_wq_cancel io_wq_cancel_pid(struct io_wq *wq, pid_t pid)
{
void *data = (void *) (unsigned long) pid;

return io_wq_cancel_cb(wq, io_wq_pid_match, data);
return io_wq_cancel_cb(wq, io_wq_io_cb_cancel_data, (void *)cwork, false);
}

struct io_wq *io_wq_create(unsigned bounded, struct io_wq_data *data)
Expand Down
4 changes: 1 addition & 3 deletions fs/io-wq.h
Original file line number Diff line number Diff line change
Expand Up @@ -90,7 +90,6 @@ struct io_wq_work {
const struct cred *creds;
struct fs_struct *fs;
unsigned flags;
pid_t task_pid;
};

static inline struct io_wq_work *wq_next_work(struct io_wq_work *work)
Expand Down Expand Up @@ -125,12 +124,11 @@ static inline bool io_wq_is_hashed(struct io_wq_work *work)

void io_wq_cancel_all(struct io_wq *wq);
enum io_wq_cancel io_wq_cancel_work(struct io_wq *wq, struct io_wq_work *cwork);
enum io_wq_cancel io_wq_cancel_pid(struct io_wq *wq, pid_t pid);

typedef bool (work_cancel_fn)(struct io_wq_work *, void *);

enum io_wq_cancel io_wq_cancel_cb(struct io_wq *wq, work_cancel_fn *cancel,
void *data);
void *data, bool cancel_all);

struct task_struct *io_wq_get_task(struct io_wq *wq);

Expand Down
Loading

0 comments on commit 4333a9b

Please sign in to comment.