Skip to content

Commit

Permalink
block: remove the queue_lock indirection
Browse files Browse the repository at this point in the history
With the legacy request path gone there is no good reason to keep
queue_lock as a pointer, we can always use the embedded lock now.

Reviewed-by: Hannes Reinecke <[email protected]>
Signed-off-by: Christoph Hellwig <[email protected]>

Fixed floppy and blk-cgroup missing conversions and half done edits.

Signed-off-by: Jens Axboe <[email protected]>
  • Loading branch information
Christoph Hellwig authored and axboe committed Nov 15, 2018
1 parent 6d46964 commit 0d945c1
Show file tree
Hide file tree
Showing 16 changed files with 92 additions and 106 deletions.
2 changes: 1 addition & 1 deletion block/bfq-cgroup.c
Original file line number Diff line number Diff line change
Expand Up @@ -334,7 +334,7 @@ static void bfqg_stats_xfer_dead(struct bfq_group *bfqg)

parent = bfqg_parent(bfqg);

lockdep_assert_held(bfqg_to_blkg(bfqg)->q->queue_lock);
lockdep_assert_held(&bfqg_to_blkg(bfqg)->q->queue_lock);

if (unlikely(!parent))
return;
Expand Down
16 changes: 8 additions & 8 deletions block/bfq-iosched.c
Original file line number Diff line number Diff line change
Expand Up @@ -399,9 +399,9 @@ static struct bfq_io_cq *bfq_bic_lookup(struct bfq_data *bfqd,
unsigned long flags;
struct bfq_io_cq *icq;

spin_lock_irqsave(q->queue_lock, flags);
spin_lock_irqsave(&q->queue_lock, flags);
icq = icq_to_bic(ioc_lookup_icq(ioc, q));
spin_unlock_irqrestore(q->queue_lock, flags);
spin_unlock_irqrestore(&q->queue_lock, flags);

return icq;
}
Expand Down Expand Up @@ -4034,7 +4034,7 @@ static void bfq_update_dispatch_stats(struct request_queue *q,
* In addition, the following queue lock guarantees that
* bfqq_group(bfqq) exists as well.
*/
spin_lock_irq(q->queue_lock);
spin_lock_irq(&q->queue_lock);
if (idle_timer_disabled)
/*
* Since the idle timer has been disabled,
Expand All @@ -4053,7 +4053,7 @@ static void bfq_update_dispatch_stats(struct request_queue *q,
bfqg_stats_set_start_empty_time(bfqg);
bfqg_stats_update_io_remove(bfqg, rq->cmd_flags);
}
spin_unlock_irq(q->queue_lock);
spin_unlock_irq(&q->queue_lock);
}
#else
static inline void bfq_update_dispatch_stats(struct request_queue *q,
Expand Down Expand Up @@ -4637,11 +4637,11 @@ static void bfq_update_insert_stats(struct request_queue *q,
* In addition, the following queue lock guarantees that
* bfqq_group(bfqq) exists as well.
*/
spin_lock_irq(q->queue_lock);
spin_lock_irq(&q->queue_lock);
bfqg_stats_update_io_add(bfqq_group(bfqq), bfqq, cmd_flags);
if (idle_timer_disabled)
bfqg_stats_update_idle_time(bfqq_group(bfqq));
spin_unlock_irq(q->queue_lock);
spin_unlock_irq(&q->queue_lock);
}
#else
static inline void bfq_update_insert_stats(struct request_queue *q,
Expand Down Expand Up @@ -5382,9 +5382,9 @@ static int bfq_init_queue(struct request_queue *q, struct elevator_type *e)
}
eq->elevator_data = bfqd;

spin_lock_irq(q->queue_lock);
spin_lock_irq(&q->queue_lock);
q->elevator = eq;
spin_unlock_irq(q->queue_lock);
spin_unlock_irq(&q->queue_lock);

/*
* Our fallback bfqq if bfq_find_alloc_queue() runs into OOM issues.
Expand Down
62 changes: 31 additions & 31 deletions block/blk-cgroup.c
Original file line number Diff line number Diff line change
Expand Up @@ -147,7 +147,7 @@ struct blkcg_gq *blkg_lookup_slowpath(struct blkcg *blkcg,
blkg = radix_tree_lookup(&blkcg->blkg_tree, q->id);
if (blkg && blkg->q == q) {
if (update_hint) {
lockdep_assert_held(q->queue_lock);
lockdep_assert_held(&q->queue_lock);
rcu_assign_pointer(blkcg->blkg_hint, blkg);
}
return blkg;
Expand All @@ -170,7 +170,7 @@ static struct blkcg_gq *blkg_create(struct blkcg *blkcg,
int i, ret;

WARN_ON_ONCE(!rcu_read_lock_held());
lockdep_assert_held(q->queue_lock);
lockdep_assert_held(&q->queue_lock);

/* blkg holds a reference to blkcg */
if (!css_tryget_online(&blkcg->css)) {
Expand Down Expand Up @@ -268,7 +268,7 @@ struct blkcg_gq *blkg_lookup_create(struct blkcg *blkcg,
struct blkcg_gq *blkg;

WARN_ON_ONCE(!rcu_read_lock_held());
lockdep_assert_held(q->queue_lock);
lockdep_assert_held(&q->queue_lock);

blkg = __blkg_lookup(blkcg, q, true);
if (blkg)
Expand Down Expand Up @@ -299,7 +299,7 @@ static void blkg_destroy(struct blkcg_gq *blkg)
struct blkcg_gq *parent = blkg->parent;
int i;

lockdep_assert_held(blkg->q->queue_lock);
lockdep_assert_held(&blkg->q->queue_lock);
lockdep_assert_held(&blkcg->lock);

/* Something wrong if we are trying to remove same group twice */
Expand Down Expand Up @@ -349,7 +349,7 @@ static void blkg_destroy_all(struct request_queue *q)
{
struct blkcg_gq *blkg, *n;

spin_lock_irq(q->queue_lock);
spin_lock_irq(&q->queue_lock);
list_for_each_entry_safe(blkg, n, &q->blkg_list, q_node) {
struct blkcg *blkcg = blkg->blkcg;

Expand All @@ -359,7 +359,7 @@ static void blkg_destroy_all(struct request_queue *q)
}

q->root_blkg = NULL;
spin_unlock_irq(q->queue_lock);
spin_unlock_irq(&q->queue_lock);
}

/*
Expand Down Expand Up @@ -454,10 +454,10 @@ void blkcg_print_blkgs(struct seq_file *sf, struct blkcg *blkcg,

rcu_read_lock();
hlist_for_each_entry_rcu(blkg, &blkcg->blkg_list, blkcg_node) {
spin_lock_irq(blkg->q->queue_lock);
spin_lock_irq(&blkg->q->queue_lock);
if (blkcg_policy_enabled(blkg->q, pol))
total += prfill(sf, blkg->pd[pol->plid], data);
spin_unlock_irq(blkg->q->queue_lock);
spin_unlock_irq(&blkg->q->queue_lock);
}
rcu_read_unlock();

Expand Down Expand Up @@ -655,7 +655,7 @@ u64 blkg_stat_recursive_sum(struct blkcg_gq *blkg,
struct cgroup_subsys_state *pos_css;
u64 sum = 0;

lockdep_assert_held(blkg->q->queue_lock);
lockdep_assert_held(&blkg->q->queue_lock);

rcu_read_lock();
blkg_for_each_descendant_pre(pos_blkg, pos_css, blkg) {
Expand Down Expand Up @@ -698,7 +698,7 @@ struct blkg_rwstat blkg_rwstat_recursive_sum(struct blkcg_gq *blkg,
struct blkg_rwstat sum = { };
int i;

lockdep_assert_held(blkg->q->queue_lock);
lockdep_assert_held(&blkg->q->queue_lock);

rcu_read_lock();
blkg_for_each_descendant_pre(pos_blkg, pos_css, blkg) {
Expand Down Expand Up @@ -729,7 +729,7 @@ static struct blkcg_gq *blkg_lookup_check(struct blkcg *blkcg,
struct request_queue *q)
{
WARN_ON_ONCE(!rcu_read_lock_held());
lockdep_assert_held(q->queue_lock);
lockdep_assert_held(&q->queue_lock);

if (!blkcg_policy_enabled(q, pol))
return ERR_PTR(-EOPNOTSUPP);
Expand All @@ -750,7 +750,7 @@ static struct blkcg_gq *blkg_lookup_check(struct blkcg *blkcg,
*/
int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol,
char *input, struct blkg_conf_ctx *ctx)
__acquires(rcu) __acquires(disk->queue->queue_lock)
__acquires(rcu) __acquires(&disk->queue->queue_lock)
{
struct gendisk *disk;
struct request_queue *q;
Expand Down Expand Up @@ -778,7 +778,7 @@ int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol,
q = disk->queue;

rcu_read_lock();
spin_lock_irq(q->queue_lock);
spin_lock_irq(&q->queue_lock);

blkg = blkg_lookup_check(blkcg, pol, q);
if (IS_ERR(blkg)) {
Expand All @@ -805,7 +805,7 @@ int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol,
}

/* Drop locks to do new blkg allocation with GFP_KERNEL. */
spin_unlock_irq(q->queue_lock);
spin_unlock_irq(&q->queue_lock);
rcu_read_unlock();

new_blkg = blkg_alloc(pos, q, GFP_KERNEL);
Expand All @@ -815,7 +815,7 @@ int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol,
}

rcu_read_lock();
spin_lock_irq(q->queue_lock);
spin_lock_irq(&q->queue_lock);

blkg = blkg_lookup_check(pos, pol, q);
if (IS_ERR(blkg)) {
Expand Down Expand Up @@ -843,7 +843,7 @@ int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol,
return 0;

fail_unlock:
spin_unlock_irq(q->queue_lock);
spin_unlock_irq(&q->queue_lock);
rcu_read_unlock();
fail:
put_disk_and_module(disk);
Expand All @@ -868,9 +868,9 @@ int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol,
* with blkg_conf_prep().
*/
void blkg_conf_finish(struct blkg_conf_ctx *ctx)
__releases(ctx->disk->queue->queue_lock) __releases(rcu)
__releases(&ctx->disk->queue->queue_lock) __releases(rcu)
{
spin_unlock_irq(ctx->disk->queue->queue_lock);
spin_unlock_irq(&ctx->disk->queue->queue_lock);
rcu_read_unlock();
put_disk_and_module(ctx->disk);
}
Expand Down Expand Up @@ -903,7 +903,7 @@ static int blkcg_print_stat(struct seq_file *sf, void *v)
*/
off += scnprintf(buf+off, size-off, "%s ", dname);

spin_lock_irq(blkg->q->queue_lock);
spin_lock_irq(&blkg->q->queue_lock);

rwstat = blkg_rwstat_recursive_sum(blkg, NULL,
offsetof(struct blkcg_gq, stat_bytes));
Expand All @@ -917,7 +917,7 @@ static int blkcg_print_stat(struct seq_file *sf, void *v)
wios = atomic64_read(&rwstat.aux_cnt[BLKG_RWSTAT_WRITE]);
dios = atomic64_read(&rwstat.aux_cnt[BLKG_RWSTAT_DISCARD]);

spin_unlock_irq(blkg->q->queue_lock);
spin_unlock_irq(&blkg->q->queue_lock);

if (rbytes || wbytes || rios || wios) {
has_stats = true;
Expand Down Expand Up @@ -1038,9 +1038,9 @@ void blkcg_destroy_blkgs(struct blkcg *blkcg)
struct blkcg_gq, blkcg_node);
struct request_queue *q = blkg->q;

if (spin_trylock(q->queue_lock)) {
if (spin_trylock(&q->queue_lock)) {
blkg_destroy(blkg);
spin_unlock(q->queue_lock);
spin_unlock(&q->queue_lock);
} else {
spin_unlock_irq(&blkcg->lock);
cpu_relax();
Expand Down Expand Up @@ -1161,12 +1161,12 @@ int blkcg_init_queue(struct request_queue *q)

/* Make sure the root blkg exists. */
rcu_read_lock();
spin_lock_irq(q->queue_lock);
spin_lock_irq(&q->queue_lock);
blkg = blkg_create(&blkcg_root, q, new_blkg);
if (IS_ERR(blkg))
goto err_unlock;
q->root_blkg = blkg;
spin_unlock_irq(q->queue_lock);
spin_unlock_irq(&q->queue_lock);
rcu_read_unlock();

if (preloaded)
Expand All @@ -1185,7 +1185,7 @@ int blkcg_init_queue(struct request_queue *q)
blkg_destroy_all(q);
return ret;
err_unlock:
spin_unlock_irq(q->queue_lock);
spin_unlock_irq(&q->queue_lock);
rcu_read_unlock();
if (preloaded)
radix_tree_preload_end();
Expand All @@ -1200,7 +1200,7 @@ int blkcg_init_queue(struct request_queue *q)
*/
void blkcg_drain_queue(struct request_queue *q)
{
lockdep_assert_held(q->queue_lock);
lockdep_assert_held(&q->queue_lock);

/*
* @q could be exiting and already have destroyed all blkgs as
Expand Down Expand Up @@ -1335,7 +1335,7 @@ int blkcg_activate_policy(struct request_queue *q,
}
}

spin_lock_irq(q->queue_lock);
spin_lock_irq(&q->queue_lock);

list_for_each_entry(blkg, &q->blkg_list, q_node) {
struct blkg_policy_data *pd;
Expand All @@ -1347,7 +1347,7 @@ int blkcg_activate_policy(struct request_queue *q,
if (!pd)
swap(pd, pd_prealloc);
if (!pd) {
spin_unlock_irq(q->queue_lock);
spin_unlock_irq(&q->queue_lock);
goto pd_prealloc;
}

Expand All @@ -1361,7 +1361,7 @@ int blkcg_activate_policy(struct request_queue *q,
__set_bit(pol->plid, q->blkcg_pols);
ret = 0;

spin_unlock_irq(q->queue_lock);
spin_unlock_irq(&q->queue_lock);
out_bypass_end:
if (q->mq_ops)
blk_mq_unfreeze_queue(q);
Expand Down Expand Up @@ -1390,7 +1390,7 @@ void blkcg_deactivate_policy(struct request_queue *q,
if (q->mq_ops)
blk_mq_freeze_queue(q);

spin_lock_irq(q->queue_lock);
spin_lock_irq(&q->queue_lock);

__clear_bit(pol->plid, q->blkcg_pols);

Expand All @@ -1403,7 +1403,7 @@ void blkcg_deactivate_policy(struct request_queue *q,
}
}

spin_unlock_irq(q->queue_lock);
spin_unlock_irq(&q->queue_lock);

if (q->mq_ops)
blk_mq_unfreeze_queue(q);
Expand Down
10 changes: 1 addition & 9 deletions block/blk-core.c
Original file line number Diff line number Diff line change
Expand Up @@ -327,8 +327,6 @@ void blk_exit_queue(struct request_queue *q)
*/
void blk_cleanup_queue(struct request_queue *q)
{
spinlock_t *lock = q->queue_lock;

/* mark @q DYING, no new request or merges will be allowed afterwards */
mutex_lock(&q->sysfs_lock);
blk_set_queue_dying(q);
Expand Down Expand Up @@ -381,11 +379,6 @@ void blk_cleanup_queue(struct request_queue *q)

percpu_ref_exit(&q->q_usage_counter);

spin_lock_irq(lock);
if (q->queue_lock != &q->__queue_lock)
q->queue_lock = &q->__queue_lock;
spin_unlock_irq(lock);

/* @q is and will stay empty, shutdown and put */
blk_put_queue(q);
}
Expand Down Expand Up @@ -524,8 +517,7 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id)
mutex_init(&q->blk_trace_mutex);
#endif
mutex_init(&q->sysfs_lock);
spin_lock_init(&q->__queue_lock);
q->queue_lock = &q->__queue_lock;
spin_lock_init(&q->queue_lock);

init_waitqueue_head(&q->mq_freeze_wq);

Expand Down
Loading

0 comments on commit 0d945c1

Please sign in to comment.