Skip to content

Commit

Permalink
block: split out request-only flags into a new namespace
Browse files Browse the repository at this point in the history
A lot of the REQ_* flags are only used on struct requests, and only of
use to the block layer and a few drivers that dig into struct request
internals.

This patch adds a new req_flags_t rq_flags field to struct request for
them, and thus dramatically shrinks the number of common requests.  It
also removes the unfortunate situation where we have to fit the fields
from the same enum into 32 bits for struct bio and 64 bits for
struct request.

Signed-off-by: Christoph Hellwig <[email protected]>
Reviewed-by: Shaun Tancheff <[email protected]>
Signed-off-by: Jens Axboe <[email protected]>
  • Loading branch information
Christoph Hellwig authored and axboe committed Oct 28, 2016
1 parent 8d2bbd4 commit e806402
Show file tree
Hide file tree
Showing 38 changed files with 242 additions and 218 deletions.
2 changes: 1 addition & 1 deletion Documentation/block/biodoc.txt
Original file line number Diff line number Diff line change
Expand Up @@ -348,7 +348,7 @@ Drivers can now specify a request prepare function (q->prep_rq_fn) that the
block layer would invoke to pre-build device commands for a given request,
or perform other preparatory processing for the request. This is routine is
called by elv_next_request(), i.e. typically just before servicing a request.
(The prepare function would not be called for requests that have REQ_DONTPREP
(The prepare function would not be called for requests that have RQF_DONTPREP
enabled)

Aside:
Expand Down
71 changes: 37 additions & 34 deletions block/blk-core.c
Original file line number Diff line number Diff line change
Expand Up @@ -145,13 +145,13 @@ static void req_bio_endio(struct request *rq, struct bio *bio,
if (error)
bio->bi_error = error;

if (unlikely(rq->cmd_flags & REQ_QUIET))
if (unlikely(rq->rq_flags & RQF_QUIET))
bio_set_flag(bio, BIO_QUIET);

bio_advance(bio, nbytes);

/* don't actually finish bio if it's part of flush sequence */
if (bio->bi_iter.bi_size == 0 && !(rq->cmd_flags & REQ_FLUSH_SEQ))
if (bio->bi_iter.bi_size == 0 && !(rq->rq_flags & RQF_FLUSH_SEQ))
bio_endio(bio);
}

Expand Down Expand Up @@ -899,7 +899,7 @@ EXPORT_SYMBOL(blk_get_queue);

static inline void blk_free_request(struct request_list *rl, struct request *rq)
{
if (rq->cmd_flags & REQ_ELVPRIV) {
if (rq->rq_flags & RQF_ELVPRIV) {
elv_put_request(rl->q, rq);
if (rq->elv.icq)
put_io_context(rq->elv.icq->ioc);
Expand Down Expand Up @@ -961,14 +961,14 @@ static void __freed_request(struct request_list *rl, int sync)
* A request has just been released. Account for it, update the full and
* congestion status, wake up any waiters. Called under q->queue_lock.
*/
static void freed_request(struct request_list *rl, int op, unsigned int flags)
static void freed_request(struct request_list *rl, bool sync,
req_flags_t rq_flags)
{
struct request_queue *q = rl->q;
int sync = rw_is_sync(op, flags);

q->nr_rqs[sync]--;
rl->count[sync]--;
if (flags & REQ_ELVPRIV)
if (rq_flags & RQF_ELVPRIV)
q->nr_rqs_elvpriv--;

__freed_request(rl, sync);
Expand Down Expand Up @@ -1079,6 +1079,7 @@ static struct request *__get_request(struct request_list *rl, int op,
struct io_cq *icq = NULL;
const bool is_sync = rw_is_sync(op, op_flags) != 0;
int may_queue;
req_flags_t rq_flags = RQF_ALLOCED;

if (unlikely(blk_queue_dying(q)))
return ERR_PTR(-ENODEV);
Expand Down Expand Up @@ -1127,7 +1128,7 @@ static struct request *__get_request(struct request_list *rl, int op,

/*
* Decide whether the new request will be managed by elevator. If
* so, mark @op_flags and increment elvpriv. Non-zero elvpriv will
* so, mark @rq_flags and increment elvpriv. Non-zero elvpriv will
* prevent the current elevator from being destroyed until the new
* request is freed. This guarantees icq's won't be destroyed and
* makes creating new ones safe.
Expand All @@ -1136,14 +1137,14 @@ static struct request *__get_request(struct request_list *rl, int op,
* it will be created after releasing queue_lock.
*/
if (blk_rq_should_init_elevator(bio) && !blk_queue_bypass(q)) {
op_flags |= REQ_ELVPRIV;
rq_flags |= RQF_ELVPRIV;
q->nr_rqs_elvpriv++;
if (et->icq_cache && ioc)
icq = ioc_lookup_icq(ioc, q);
}

if (blk_queue_io_stat(q))
op_flags |= REQ_IO_STAT;
rq_flags |= RQF_IO_STAT;
spin_unlock_irq(q->queue_lock);

/* allocate and init request */
Expand All @@ -1153,10 +1154,11 @@ static struct request *__get_request(struct request_list *rl, int op,

blk_rq_init(q, rq);
blk_rq_set_rl(rq, rl);
req_set_op_attrs(rq, op, op_flags | REQ_ALLOCED);
req_set_op_attrs(rq, op, op_flags);
rq->rq_flags = rq_flags;

/* init elvpriv */
if (op_flags & REQ_ELVPRIV) {
if (rq_flags & RQF_ELVPRIV) {
if (unlikely(et->icq_cache && !icq)) {
if (ioc)
icq = ioc_create_icq(ioc, q, gfp_mask);
Expand Down Expand Up @@ -1195,7 +1197,7 @@ static struct request *__get_request(struct request_list *rl, int op,
printk_ratelimited(KERN_WARNING "%s: dev %s: request aux data allocation failed, iosched may be disturbed\n",
__func__, dev_name(q->backing_dev_info.dev));

rq->cmd_flags &= ~REQ_ELVPRIV;
rq->rq_flags &= ~RQF_ELVPRIV;
rq->elv.icq = NULL;

spin_lock_irq(q->queue_lock);
Expand All @@ -1212,7 +1214,7 @@ static struct request *__get_request(struct request_list *rl, int op,
* queue, but this is pretty rare.
*/
spin_lock_irq(q->queue_lock);
freed_request(rl, op, op_flags);
freed_request(rl, is_sync, rq_flags);

/*
* in the very unlikely event that allocation failed and no
Expand Down Expand Up @@ -1347,7 +1349,7 @@ void blk_requeue_request(struct request_queue *q, struct request *rq)
blk_clear_rq_complete(rq);
trace_block_rq_requeue(q, rq);

if (rq->cmd_flags & REQ_QUEUED)
if (rq->rq_flags & RQF_QUEUED)
blk_queue_end_tag(q, rq);

BUG_ON(blk_queued_rq(rq));
Expand Down Expand Up @@ -1409,7 +1411,7 @@ EXPORT_SYMBOL_GPL(part_round_stats);
#ifdef CONFIG_PM
static void blk_pm_put_request(struct request *rq)
{
if (rq->q->dev && !(rq->cmd_flags & REQ_PM) && !--rq->q->nr_pending)
if (rq->q->dev && !(rq->rq_flags & RQF_PM) && !--rq->q->nr_pending)
pm_runtime_mark_last_busy(rq->q->dev);
}
#else
Expand All @@ -1421,6 +1423,8 @@ static inline void blk_pm_put_request(struct request *rq) {}
*/
void __blk_put_request(struct request_queue *q, struct request *req)
{
req_flags_t rq_flags = req->rq_flags;

if (unlikely(!q))
return;

Expand All @@ -1440,16 +1444,15 @@ void __blk_put_request(struct request_queue *q, struct request *req)
* Request may not have originated from ll_rw_blk. if not,
* it didn't come out of our reserved rq pools
*/
if (req->cmd_flags & REQ_ALLOCED) {
unsigned int flags = req->cmd_flags;
int op = req_op(req);
if (rq_flags & RQF_ALLOCED) {
struct request_list *rl = blk_rq_rl(req);
bool sync = rw_is_sync(req_op(req), req->cmd_flags);

BUG_ON(!list_empty(&req->queuelist));
BUG_ON(ELV_ON_HASH(req));

blk_free_request(rl, req);
freed_request(rl, op, flags);
freed_request(rl, sync, rq_flags);
blk_put_rl(rl);
}
}
Expand Down Expand Up @@ -2214,7 +2217,7 @@ unsigned int blk_rq_err_bytes(const struct request *rq)
unsigned int bytes = 0;
struct bio *bio;

if (!(rq->cmd_flags & REQ_MIXED_MERGE))
if (!(rq->rq_flags & RQF_MIXED_MERGE))
return blk_rq_bytes(rq);

/*
Expand Down Expand Up @@ -2257,7 +2260,7 @@ void blk_account_io_done(struct request *req)
* normal IO on queueing nor completion. Accounting the
* containing request is enough.
*/
if (blk_do_io_stat(req) && !(req->cmd_flags & REQ_FLUSH_SEQ)) {
if (blk_do_io_stat(req) && !(req->rq_flags & RQF_FLUSH_SEQ)) {
unsigned long duration = jiffies - req->start_time;
const int rw = rq_data_dir(req);
struct hd_struct *part;
Expand Down Expand Up @@ -2285,7 +2288,7 @@ static struct request *blk_pm_peek_request(struct request_queue *q,
struct request *rq)
{
if (q->dev && (q->rpm_status == RPM_SUSPENDED ||
(q->rpm_status != RPM_ACTIVE && !(rq->cmd_flags & REQ_PM))))
(q->rpm_status != RPM_ACTIVE && !(rq->rq_flags & RQF_PM))))
return NULL;
else
return rq;
Expand Down Expand Up @@ -2361,21 +2364,21 @@ struct request *blk_peek_request(struct request_queue *q)
if (!rq)
break;

if (!(rq->cmd_flags & REQ_STARTED)) {
if (!(rq->rq_flags & RQF_STARTED)) {
/*
* This is the first time the device driver
* sees this request (possibly after
* requeueing). Notify IO scheduler.
*/
if (rq->cmd_flags & REQ_SORTED)
if (rq->rq_flags & RQF_SORTED)
elv_activate_rq(q, rq);

/*
* just mark as started even if we don't start
* it, a request that has been delayed should
* not be passed by new incoming requests
*/
rq->cmd_flags |= REQ_STARTED;
rq->rq_flags |= RQF_STARTED;
trace_block_rq_issue(q, rq);
}

Expand All @@ -2384,7 +2387,7 @@ struct request *blk_peek_request(struct request_queue *q)
q->boundary_rq = NULL;
}

if (rq->cmd_flags & REQ_DONTPREP)
if (rq->rq_flags & RQF_DONTPREP)
break;

if (q->dma_drain_size && blk_rq_bytes(rq)) {
Expand All @@ -2407,11 +2410,11 @@ struct request *blk_peek_request(struct request_queue *q)
/*
* the request may have been (partially) prepped.
* we need to keep this request in the front to
* avoid resource deadlock. REQ_STARTED will
* avoid resource deadlock. RQF_STARTED will
* prevent other fs requests from passing this one.
*/
if (q->dma_drain_size && blk_rq_bytes(rq) &&
!(rq->cmd_flags & REQ_DONTPREP)) {
!(rq->rq_flags & RQF_DONTPREP)) {
/*
* remove the space for the drain we added
* so that we don't add it again
Expand All @@ -2424,7 +2427,7 @@ struct request *blk_peek_request(struct request_queue *q)
} else if (ret == BLKPREP_KILL || ret == BLKPREP_INVALID) {
int err = (ret == BLKPREP_INVALID) ? -EREMOTEIO : -EIO;

rq->cmd_flags |= REQ_QUIET;
rq->rq_flags |= RQF_QUIET;
/*
* Mark this request as started so we don't trigger
* any debug logic in the end I/O path.
Expand Down Expand Up @@ -2561,7 +2564,7 @@ bool blk_update_request(struct request *req, int error, unsigned int nr_bytes)
req->errors = 0;

if (error && req->cmd_type == REQ_TYPE_FS &&
!(req->cmd_flags & REQ_QUIET)) {
!(req->rq_flags & RQF_QUIET)) {
char *error_type;

switch (error) {
Expand Down Expand Up @@ -2634,7 +2637,7 @@ bool blk_update_request(struct request *req, int error, unsigned int nr_bytes)
req->__sector += total_bytes >> 9;

/* mixed attributes always follow the first bio */
if (req->cmd_flags & REQ_MIXED_MERGE) {
if (req->rq_flags & RQF_MIXED_MERGE) {
req->cmd_flags &= ~REQ_FAILFAST_MASK;
req->cmd_flags |= req->bio->bi_opf & REQ_FAILFAST_MASK;
}
Expand Down Expand Up @@ -2687,7 +2690,7 @@ void blk_unprep_request(struct request *req)
{
struct request_queue *q = req->q;

req->cmd_flags &= ~REQ_DONTPREP;
req->rq_flags &= ~RQF_DONTPREP;
if (q->unprep_rq_fn)
q->unprep_rq_fn(q, req);
}
Expand All @@ -2698,7 +2701,7 @@ EXPORT_SYMBOL_GPL(blk_unprep_request);
*/
void blk_finish_request(struct request *req, int error)
{
if (req->cmd_flags & REQ_QUEUED)
if (req->rq_flags & RQF_QUEUED)
blk_queue_end_tag(req->q, req);

BUG_ON(blk_queued_rq(req));
Expand All @@ -2708,7 +2711,7 @@ void blk_finish_request(struct request *req, int error)

blk_delete_timer(req);

if (req->cmd_flags & REQ_DONTPREP)
if (req->rq_flags & RQF_DONTPREP)
blk_unprep_request(req);

blk_account_io_done(req);
Expand Down
2 changes: 1 addition & 1 deletion block/blk-exec.c
Original file line number Diff line number Diff line change
Expand Up @@ -72,7 +72,7 @@ void blk_execute_rq_nowait(struct request_queue *q, struct gendisk *bd_disk,
spin_lock_irq(q->queue_lock);

if (unlikely(blk_queue_dying(q))) {
rq->cmd_flags |= REQ_QUIET;
rq->rq_flags |= RQF_QUIET;
rq->errors = -ENXIO;
__blk_end_request_all(rq, rq->errors);
spin_unlock_irq(q->queue_lock);
Expand Down
9 changes: 5 additions & 4 deletions block/blk-flush.c
Original file line number Diff line number Diff line change
Expand Up @@ -56,7 +56,7 @@
* Once while executing DATA and again after the whole sequence is
* complete. The first completion updates the contained bio but doesn't
* finish it so that the bio submitter is notified only after the whole
* sequence is complete. This is implemented by testing REQ_FLUSH_SEQ in
* sequence is complete. This is implemented by testing RQF_FLUSH_SEQ in
* req_bio_endio().
*
* The above peculiarity requires that each FLUSH/FUA request has only one
Expand Down Expand Up @@ -127,7 +127,7 @@ static void blk_flush_restore_request(struct request *rq)
rq->bio = rq->biotail;

/* make @rq a normal request */
rq->cmd_flags &= ~REQ_FLUSH_SEQ;
rq->rq_flags &= ~RQF_FLUSH_SEQ;
rq->end_io = rq->flush.saved_end_io;
}

Expand Down Expand Up @@ -330,7 +330,8 @@ static bool blk_kick_flush(struct request_queue *q, struct blk_flush_queue *fq)
}

flush_rq->cmd_type = REQ_TYPE_FS;
req_set_op_attrs(flush_rq, REQ_OP_FLUSH, WRITE_FLUSH | REQ_FLUSH_SEQ);
req_set_op_attrs(flush_rq, REQ_OP_FLUSH, WRITE_FLUSH);
flush_rq->rq_flags |= RQF_FLUSH_SEQ;
flush_rq->rq_disk = first_rq->rq_disk;
flush_rq->end_io = flush_end_io;

Expand Down Expand Up @@ -433,7 +434,7 @@ void blk_insert_flush(struct request *rq)
*/
memset(&rq->flush, 0, sizeof(rq->flush));
INIT_LIST_HEAD(&rq->flush.list);
rq->cmd_flags |= REQ_FLUSH_SEQ;
rq->rq_flags |= RQF_FLUSH_SEQ;
rq->flush.saved_end_io = rq->end_io; /* Usually NULL */
if (q->mq_ops) {
rq->end_io = mq_flush_data_end_io;
Expand Down
4 changes: 2 additions & 2 deletions block/blk-map.c
Original file line number Diff line number Diff line change
Expand Up @@ -135,7 +135,7 @@ int blk_rq_map_user_iov(struct request_queue *q, struct request *rq,
} while (iov_iter_count(&i));

if (!bio_flagged(bio, BIO_USER_MAPPED))
rq->cmd_flags |= REQ_COPY_USER;
rq->rq_flags |= RQF_COPY_USER;
return 0;

unmap_rq:
Expand Down Expand Up @@ -232,7 +232,7 @@ int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf,
bio_set_op_attrs(bio, REQ_OP_WRITE, 0);

if (do_copy)
rq->cmd_flags |= REQ_COPY_USER;
rq->rq_flags |= RQF_COPY_USER;

ret = blk_rq_append_bio(rq, bio);
if (unlikely(ret)) {
Expand Down
8 changes: 4 additions & 4 deletions block/blk-merge.c
Original file line number Diff line number Diff line change
Expand Up @@ -456,7 +456,7 @@ int blk_rq_map_sg(struct request_queue *q, struct request *rq,
if (rq->bio)
nsegs = __blk_bios_map_sg(q, rq->bio, sglist, &sg);

if (unlikely(rq->cmd_flags & REQ_COPY_USER) &&
if (unlikely(rq->rq_flags & RQF_COPY_USER) &&
(blk_rq_bytes(rq) & q->dma_pad_mask)) {
unsigned int pad_len =
(q->dma_pad_mask & ~blk_rq_bytes(rq)) + 1;
Expand Down Expand Up @@ -634,7 +634,7 @@ void blk_rq_set_mixed_merge(struct request *rq)
unsigned int ff = rq->cmd_flags & REQ_FAILFAST_MASK;
struct bio *bio;

if (rq->cmd_flags & REQ_MIXED_MERGE)
if (rq->rq_flags & RQF_MIXED_MERGE)
return;

/*
Expand All @@ -647,7 +647,7 @@ void blk_rq_set_mixed_merge(struct request *rq)
(bio->bi_opf & REQ_FAILFAST_MASK) != ff);
bio->bi_opf |= ff;
}
rq->cmd_flags |= REQ_MIXED_MERGE;
rq->rq_flags |= RQF_MIXED_MERGE;
}

static void blk_account_io_merge(struct request *req)
Expand Down Expand Up @@ -709,7 +709,7 @@ static int attempt_merge(struct request_queue *q, struct request *req,
* makes sure that all involved bios have mixable attributes
* set properly.
*/
if ((req->cmd_flags | next->cmd_flags) & REQ_MIXED_MERGE ||
if (((req->rq_flags | next->rq_flags) & RQF_MIXED_MERGE) ||
(req->cmd_flags & REQ_FAILFAST_MASK) !=
(next->cmd_flags & REQ_FAILFAST_MASK)) {
blk_rq_set_mixed_merge(req);
Expand Down
Loading

0 comments on commit e806402

Please sign in to comment.