Skip to content

Commit

Permalink
blk-ioc: protect ioc_destroy_icq() by 'queue_lock'
Browse files Browse the repository at this point in the history
Currently, icq is tracked by both request_queue(icq->q_node) and
task(icq->ioc_node), and ioc_clear_queue() from elevator exit is not
safe because it can access the list without protection:

ioc_clear_queue			ioc_release_fn
 lock queue_lock
 list_splice
 /* move queue list to a local list */
 unlock queue_lock
 /*
  * lock is released, the local list
  * can be accessed through task exit.
  */

				lock ioc->lock
				while (!hlist_empty)
				 icq = hlist_entry
				 lock queue_lock
				  ioc_destroy_icq
				   delete icq->ioc_node
 while (!list_empty)
  icq = list_entry()		   list_del icq->q_node
  /*
   * This is not protected by any lock,
   * list_entry concurrent with list_del
   * is not safe.
   */

				 unlock queue_lock
				unlock ioc->lock

Fix this problem by protecting list 'icq->q_node' by queue_lock from
ioc_clear_queue().

Reported-and-tested-by: Pradeep Pragallapati <[email protected]>
Link: https://lore.kernel.org/lkml/[email protected]/
Signed-off-by: Yu Kuai <[email protected]>
Reviewed-by: Christoph Hellwig <[email protected]>
Link: https://lore.kernel.org/r/[email protected]
Signed-off-by: Jens Axboe <[email protected]>
  • Loading branch information
YuKuai-huawei authored and axboe committed Jun 1, 2023
1 parent 6c50000 commit 5a0ac57
Showing 1 changed file with 13 additions and 17 deletions.
30 changes: 13 additions & 17 deletions block/blk-ioc.c
Original file line number Diff line number Diff line change
Expand Up @@ -77,6 +77,10 @@ static void ioc_destroy_icq(struct io_cq *icq)
struct elevator_type *et = q->elevator->type;

lockdep_assert_held(&ioc->lock);
lockdep_assert_held(&q->queue_lock);

if (icq->flags & ICQ_DESTROYED)
return;

radix_tree_delete(&ioc->icq_tree, icq->q->id);
hlist_del_init(&icq->ioc_node);
Expand Down Expand Up @@ -128,12 +132,7 @@ static void ioc_release_fn(struct work_struct *work)
spin_lock(&q->queue_lock);
spin_lock(&ioc->lock);

/*
* The icq may have been destroyed when the ioc lock
* was released.
*/
if (!(icq->flags & ICQ_DESTROYED))
ioc_destroy_icq(icq);
ioc_destroy_icq(icq);

spin_unlock(&q->queue_lock);
rcu_read_unlock();
Expand Down Expand Up @@ -171,23 +170,20 @@ static bool ioc_delay_free(struct io_context *ioc)
*/
void ioc_clear_queue(struct request_queue *q)
{
LIST_HEAD(icq_list);

spin_lock_irq(&q->queue_lock);
list_splice_init(&q->icq_list, &icq_list);
spin_unlock_irq(&q->queue_lock);

rcu_read_lock();
while (!list_empty(&icq_list)) {
while (!list_empty(&q->icq_list)) {
struct io_cq *icq =
list_entry(icq_list.next, struct io_cq, q_node);
list_first_entry(&q->icq_list, struct io_cq, q_node);

/*
* Other context won't hold ioc lock to wait for queue_lock, see
* details in ioc_release_fn().
*/
spin_lock_irq(&icq->ioc->lock);
if (!(icq->flags & ICQ_DESTROYED))
ioc_destroy_icq(icq);
ioc_destroy_icq(icq);
spin_unlock_irq(&icq->ioc->lock);
}
rcu_read_unlock();
spin_unlock_irq(&q->queue_lock);
}
#else /* CONFIG_BLK_ICQ */
static inline void ioc_exit_icqs(struct io_context *ioc)
Expand Down

0 comments on commit 5a0ac57

Please sign in to comment.