Skip to content

Commit

Permalink
block: rename bio bi_rw to bi_opf
Browse files Browse the repository at this point in the history
Since commit 63a4cc2, bio->bi_rw contains flags in the lower
portion and the op code in the higher portions. This means that
old code that relies on manually setting bi_rw is most likely
going to be broken. Instead of letting that brokeness linger,
rename the member, to force old and out-of-tree code to break
at compile time instead of at runtime.

No intended functional changes in this commit.

Signed-off-by: Jens Axboe <[email protected]>
  • Loading branch information
axboe committed Aug 7, 2016
1 parent 31c64f7 commit 1eff9d3
Show file tree
Hide file tree
Showing 51 changed files with 158 additions and 157 deletions.
4 changes: 2 additions & 2 deletions Documentation/block/biodoc.txt
Original file line number Diff line number Diff line change
Expand Up @@ -269,7 +269,7 @@ Arjan's proposed request priority scheme allows higher levels some broad
requests which haven't aged too much on the queue. Potentially this priority
could even be exposed to applications in some manner, providing higher level
tunability. Time based aging avoids starvation of lower priority
requests. Some bits in the bi_rw flags field in the bio structure are
requests. Some bits in the bi_opf flags field in the bio structure are
intended to be used for this priority information.


Expand Down Expand Up @@ -432,7 +432,7 @@ struct bio {
struct bio *bi_next; /* request queue link */
struct block_device *bi_bdev; /* target device */
unsigned long bi_flags; /* status, command, etc */
unsigned long bi_rw; /* low bits: r/w, high: priority */
unsigned long bi_opf; /* low bits: r/w, high: priority */

unsigned int bi_vcnt; /* how may bio_vec's */
struct bvec_iter bi_iter; /* current index into bio_vec array */
Expand Down
2 changes: 1 addition & 1 deletion Documentation/device-mapper/dm-flakey.txt
Original file line number Diff line number Diff line change
Expand Up @@ -42,7 +42,7 @@ Optional feature parameters:
<direction>: Either 'r' to corrupt reads or 'w' to corrupt writes.
'w' is incompatible with drop_writes.
<value>: The value (from 0-255) to write.
<flags>: Perform the replacement only if bio->bi_rw has all the
<flags>: Perform the replacement only if bio->bi_opf has all the
selected flags set.

Examples:
Expand Down
2 changes: 1 addition & 1 deletion block/bio-integrity.c
Original file line number Diff line number Diff line change
Expand Up @@ -86,7 +86,7 @@ struct bio_integrity_payload *bio_integrity_alloc(struct bio *bio,

bip->bip_bio = bio;
bio->bi_integrity = bip;
bio->bi_rw |= REQ_INTEGRITY;
bio->bi_opf |= REQ_INTEGRITY;

return bip;
err:
Expand Down
6 changes: 3 additions & 3 deletions block/bio.c
Original file line number Diff line number Diff line change
Expand Up @@ -580,7 +580,7 @@ void __bio_clone_fast(struct bio *bio, struct bio *bio_src)
*/
bio->bi_bdev = bio_src->bi_bdev;
bio_set_flag(bio, BIO_CLONED);
bio->bi_rw = bio_src->bi_rw;
bio->bi_opf = bio_src->bi_opf;
bio->bi_iter = bio_src->bi_iter;
bio->bi_io_vec = bio_src->bi_io_vec;

Expand Down Expand Up @@ -663,7 +663,7 @@ struct bio *bio_clone_bioset(struct bio *bio_src, gfp_t gfp_mask,
if (!bio)
return NULL;
bio->bi_bdev = bio_src->bi_bdev;
bio->bi_rw = bio_src->bi_rw;
bio->bi_opf = bio_src->bi_opf;
bio->bi_iter.bi_sector = bio_src->bi_iter.bi_sector;
bio->bi_iter.bi_size = bio_src->bi_iter.bi_size;

Expand Down Expand Up @@ -873,7 +873,7 @@ int submit_bio_wait(struct bio *bio)
init_completion(&ret.event);
bio->bi_private = &ret;
bio->bi_end_io = submit_bio_wait_endio;
bio->bi_rw |= REQ_SYNC;
bio->bi_opf |= REQ_SYNC;
submit_bio(bio);
wait_for_completion_io(&ret.event);

Expand Down
26 changes: 13 additions & 13 deletions block/blk-core.c
Original file line number Diff line number Diff line change
Expand Up @@ -1029,7 +1029,7 @@ static bool blk_rq_should_init_elevator(struct bio *bio)
* Flush requests do not use the elevator so skip initialization.
* This allows a request to share the flush and elevator data.
*/
if (bio->bi_rw & (REQ_PREFLUSH | REQ_FUA))
if (bio->bi_opf & (REQ_PREFLUSH | REQ_FUA))
return false;

return true;
Expand Down Expand Up @@ -1504,7 +1504,7 @@ EXPORT_SYMBOL_GPL(blk_add_request_payload);
bool bio_attempt_back_merge(struct request_queue *q, struct request *req,
struct bio *bio)
{
const int ff = bio->bi_rw & REQ_FAILFAST_MASK;
const int ff = bio->bi_opf & REQ_FAILFAST_MASK;

if (!ll_back_merge_fn(q, req, bio))
return false;
Expand All @@ -1526,7 +1526,7 @@ bool bio_attempt_back_merge(struct request_queue *q, struct request *req,
bool bio_attempt_front_merge(struct request_queue *q, struct request *req,
struct bio *bio)
{
const int ff = bio->bi_rw & REQ_FAILFAST_MASK;
const int ff = bio->bi_opf & REQ_FAILFAST_MASK;

if (!ll_front_merge_fn(q, req, bio))
return false;
Expand Down Expand Up @@ -1648,8 +1648,8 @@ void init_request_from_bio(struct request *req, struct bio *bio)
{
req->cmd_type = REQ_TYPE_FS;

req->cmd_flags |= bio->bi_rw & REQ_COMMON_MASK;
if (bio->bi_rw & REQ_RAHEAD)
req->cmd_flags |= bio->bi_opf & REQ_COMMON_MASK;
if (bio->bi_opf & REQ_RAHEAD)
req->cmd_flags |= REQ_FAILFAST_MASK;

req->errors = 0;
Expand All @@ -1660,7 +1660,7 @@ void init_request_from_bio(struct request *req, struct bio *bio)

static blk_qc_t blk_queue_bio(struct request_queue *q, struct bio *bio)
{
const bool sync = !!(bio->bi_rw & REQ_SYNC);
const bool sync = !!(bio->bi_opf & REQ_SYNC);
struct blk_plug *plug;
int el_ret, rw_flags = 0, where = ELEVATOR_INSERT_SORT;
struct request *req;
Expand All @@ -1681,7 +1681,7 @@ static blk_qc_t blk_queue_bio(struct request_queue *q, struct bio *bio)
return BLK_QC_T_NONE;
}

if (bio->bi_rw & (REQ_PREFLUSH | REQ_FUA)) {
if (bio->bi_opf & (REQ_PREFLUSH | REQ_FUA)) {
spin_lock_irq(q->queue_lock);
where = ELEVATOR_INSERT_FLUSH;
goto get_rq;
Expand Down Expand Up @@ -1728,7 +1728,7 @@ static blk_qc_t blk_queue_bio(struct request_queue *q, struct bio *bio)
/*
* Add in META/PRIO flags, if set, before we get to the IO scheduler
*/
rw_flags |= (bio->bi_rw & (REQ_META | REQ_PRIO));
rw_flags |= (bio->bi_opf & (REQ_META | REQ_PRIO));

/*
* Grab a free request. This is might sleep but can not fail.
Expand Down Expand Up @@ -1805,7 +1805,7 @@ static void handle_bad_sector(struct bio *bio)
printk(KERN_INFO "attempt to access beyond end of device\n");
printk(KERN_INFO "%s: rw=%d, want=%Lu, limit=%Lu\n",
bdevname(bio->bi_bdev, b),
bio->bi_rw,
bio->bi_opf,
(unsigned long long)bio_end_sector(bio),
(long long)(i_size_read(bio->bi_bdev->bd_inode) >> 9));
}
Expand Down Expand Up @@ -1918,9 +1918,9 @@ generic_make_request_checks(struct bio *bio)
* drivers without flush support don't have to worry
* about them.
*/
if ((bio->bi_rw & (REQ_PREFLUSH | REQ_FUA)) &&
if ((bio->bi_opf & (REQ_PREFLUSH | REQ_FUA)) &&
!test_bit(QUEUE_FLAG_WC, &q->queue_flags)) {
bio->bi_rw &= ~(REQ_PREFLUSH | REQ_FUA);
bio->bi_opf &= ~(REQ_PREFLUSH | REQ_FUA);
if (!nr_sectors) {
err = 0;
goto end_io;
Expand Down Expand Up @@ -2219,7 +2219,7 @@ unsigned int blk_rq_err_bytes(const struct request *rq)
* one.
*/
for (bio = rq->bio; bio; bio = bio->bi_next) {
if ((bio->bi_rw & ff) != ff)
if ((bio->bi_opf & ff) != ff)
break;
bytes += bio->bi_iter.bi_size;
}
Expand Down Expand Up @@ -2630,7 +2630,7 @@ bool blk_update_request(struct request *req, int error, unsigned int nr_bytes)
/* mixed attributes always follow the first bio */
if (req->cmd_flags & REQ_MIXED_MERGE) {
req->cmd_flags &= ~REQ_FAILFAST_MASK;
req->cmd_flags |= req->bio->bi_rw & REQ_FAILFAST_MASK;
req->cmd_flags |= req->bio->bi_opf & REQ_FAILFAST_MASK;
}

/*
Expand Down
8 changes: 4 additions & 4 deletions block/blk-merge.c
Original file line number Diff line number Diff line change
Expand Up @@ -186,7 +186,7 @@ void blk_queue_split(struct request_queue *q, struct bio **bio,

if (split) {
/* there isn't chance to merge the splitted bio */
split->bi_rw |= REQ_NOMERGE;
split->bi_opf |= REQ_NOMERGE;

bio_chain(split, *bio);
trace_block_split(q, split, (*bio)->bi_iter.bi_sector);
Expand Down Expand Up @@ -616,9 +616,9 @@ void blk_rq_set_mixed_merge(struct request *rq)
* Distributes the attributs to each bio.
*/
for (bio = rq->bio; bio; bio = bio->bi_next) {
WARN_ON_ONCE((bio->bi_rw & REQ_FAILFAST_MASK) &&
(bio->bi_rw & REQ_FAILFAST_MASK) != ff);
bio->bi_rw |= ff;
WARN_ON_ONCE((bio->bi_opf & REQ_FAILFAST_MASK) &&
(bio->bi_opf & REQ_FAILFAST_MASK) != ff);
bio->bi_opf |= ff;
}
rq->cmd_flags |= REQ_MIXED_MERGE;
}
Expand Down
10 changes: 5 additions & 5 deletions block/blk-mq.c
Original file line number Diff line number Diff line change
Expand Up @@ -1234,7 +1234,7 @@ static struct request *blk_mq_map_request(struct request_queue *q,
ctx = blk_mq_get_ctx(q);
hctx = q->mq_ops->map_queue(q, ctx->cpu);

if (rw_is_sync(bio_op(bio), bio->bi_rw))
if (rw_is_sync(bio_op(bio), bio->bi_opf))
op_flags |= REQ_SYNC;

trace_block_getrq(q, bio, op);
Expand Down Expand Up @@ -1302,8 +1302,8 @@ static int blk_mq_direct_issue_request(struct request *rq, blk_qc_t *cookie)
*/
static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)
{
const int is_sync = rw_is_sync(bio_op(bio), bio->bi_rw);
const int is_flush_fua = bio->bi_rw & (REQ_PREFLUSH | REQ_FUA);
const int is_sync = rw_is_sync(bio_op(bio), bio->bi_opf);
const int is_flush_fua = bio->bi_opf & (REQ_PREFLUSH | REQ_FUA);
struct blk_map_ctx data;
struct request *rq;
unsigned int request_count = 0;
Expand Down Expand Up @@ -1396,8 +1396,8 @@ static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)
*/
static blk_qc_t blk_sq_make_request(struct request_queue *q, struct bio *bio)
{
const int is_sync = rw_is_sync(bio_op(bio), bio->bi_rw);
const int is_flush_fua = bio->bi_rw & (REQ_PREFLUSH | REQ_FUA);
const int is_sync = rw_is_sync(bio_op(bio), bio->bi_opf);
const int is_flush_fua = bio->bi_opf & (REQ_PREFLUSH | REQ_FUA);
struct blk_plug *plug;
unsigned int request_count = 0;
struct blk_map_ctx data;
Expand Down
8 changes: 4 additions & 4 deletions block/blk-throttle.c
Original file line number Diff line number Diff line change
Expand Up @@ -821,8 +821,8 @@ static void throtl_charge_bio(struct throtl_grp *tg, struct bio *bio)
* second time when it eventually gets issued. Set it when a bio
* is being charged to a tg.
*/
if (!(bio->bi_rw & REQ_THROTTLED))
bio->bi_rw |= REQ_THROTTLED;
if (!(bio->bi_opf & REQ_THROTTLED))
bio->bi_opf |= REQ_THROTTLED;
}

/**
Expand Down Expand Up @@ -1399,7 +1399,7 @@ bool blk_throtl_bio(struct request_queue *q, struct blkcg_gq *blkg,
WARN_ON_ONCE(!rcu_read_lock_held());

/* see throtl_charge_bio() */
if ((bio->bi_rw & REQ_THROTTLED) || !tg->has_rules[rw])
if ((bio->bi_opf & REQ_THROTTLED) || !tg->has_rules[rw])
goto out;

spin_lock_irq(q->queue_lock);
Expand Down Expand Up @@ -1478,7 +1478,7 @@ bool blk_throtl_bio(struct request_queue *q, struct blkcg_gq *blkg,
* being issued.
*/
if (!throttled)
bio->bi_rw &= ~REQ_THROTTLED;
bio->bi_opf &= ~REQ_THROTTLED;
return throttled;
}

Expand Down
4 changes: 2 additions & 2 deletions block/cfq-iosched.c
Original file line number Diff line number Diff line change
Expand Up @@ -918,7 +918,7 @@ static inline struct cfq_data *cic_to_cfqd(struct cfq_io_cq *cic)
*/
static inline bool cfq_bio_sync(struct bio *bio)
{
return bio_data_dir(bio) == READ || (bio->bi_rw & REQ_SYNC);
return bio_data_dir(bio) == READ || (bio->bi_opf & REQ_SYNC);
}

/*
Expand Down Expand Up @@ -2565,7 +2565,7 @@ static void cfq_merged_request(struct request_queue *q, struct request *req,
static void cfq_bio_merged(struct request_queue *q, struct request *req,
struct bio *bio)
{
cfqg_stats_update_io_merged(RQ_CFQG(req), bio_op(bio), bio->bi_rw);
cfqg_stats_update_io_merged(RQ_CFQG(req), bio_op(bio), bio->bi_opf);
}

static void
Expand Down
8 changes: 4 additions & 4 deletions drivers/block/drbd/drbd_main.c
Original file line number Diff line number Diff line change
Expand Up @@ -1663,13 +1663,13 @@ static u32 bio_flags_to_wire(struct drbd_connection *connection,
struct bio *bio)
{
if (connection->agreed_pro_version >= 95)
return (bio->bi_rw & REQ_SYNC ? DP_RW_SYNC : 0) |
(bio->bi_rw & REQ_FUA ? DP_FUA : 0) |
(bio->bi_rw & REQ_PREFLUSH ? DP_FLUSH : 0) |
return (bio->bi_opf & REQ_SYNC ? DP_RW_SYNC : 0) |
(bio->bi_opf & REQ_FUA ? DP_FUA : 0) |
(bio->bi_opf & REQ_PREFLUSH ? DP_FLUSH : 0) |
(bio_op(bio) == REQ_OP_WRITE_SAME ? DP_WSAME : 0) |
(bio_op(bio) == REQ_OP_DISCARD ? DP_DISCARD : 0);
else
return bio->bi_rw & REQ_SYNC ? DP_RW_SYNC : 0;
return bio->bi_opf & REQ_SYNC ? DP_RW_SYNC : 0;
}

/* Used to send write or TRIM aka REQ_DISCARD requests
Expand Down
2 changes: 1 addition & 1 deletion drivers/block/drbd/drbd_receiver.c
Original file line number Diff line number Diff line change
Expand Up @@ -1564,7 +1564,7 @@ static void drbd_issue_peer_wsame(struct drbd_device *device,
* drbd_submit_peer_request()
* @device: DRBD device.
* @peer_req: peer request
* @rw: flag field, see bio->bi_rw
* @rw: flag field, see bio->bi_opf
*
* May spread the pages to multiple bios,
* depending on bio_add_page restrictions.
Expand Down
6 changes: 3 additions & 3 deletions drivers/block/drbd/drbd_req.c
Original file line number Diff line number Diff line change
Expand Up @@ -288,7 +288,7 @@ void drbd_req_complete(struct drbd_request *req, struct bio_and_error *m)
*/
if (!ok &&
bio_op(req->master_bio) == REQ_OP_READ &&
!(req->master_bio->bi_rw & REQ_RAHEAD) &&
!(req->master_bio->bi_opf & REQ_RAHEAD) &&
!list_empty(&req->tl_requests))
req->rq_state |= RQ_POSTPONED;

Expand Down Expand Up @@ -1137,7 +1137,7 @@ static int drbd_process_write_request(struct drbd_request *req)
* replicating, in which case there is no point. */
if (unlikely(req->i.size == 0)) {
/* The only size==0 bios we expect are empty flushes. */
D_ASSERT(device, req->master_bio->bi_rw & REQ_PREFLUSH);
D_ASSERT(device, req->master_bio->bi_opf & REQ_PREFLUSH);
if (remote)
_req_mod(req, QUEUE_AS_DRBD_BARRIER);
return remote;
Expand Down Expand Up @@ -1176,7 +1176,7 @@ drbd_submit_req_private_bio(struct drbd_request *req)

if (bio_op(bio) != REQ_OP_READ)
type = DRBD_FAULT_DT_WR;
else if (bio->bi_rw & REQ_RAHEAD)
else if (bio->bi_opf & REQ_RAHEAD)
type = DRBD_FAULT_DT_RA;
else
type = DRBD_FAULT_DT_RD;
Expand Down
2 changes: 1 addition & 1 deletion drivers/block/drbd/drbd_worker.c
Original file line number Diff line number Diff line change
Expand Up @@ -256,7 +256,7 @@ void drbd_request_endio(struct bio *bio)
what = DISCARD_COMPLETED_WITH_ERROR;
break;
case REQ_OP_READ:
if (bio->bi_rw & REQ_RAHEAD)
if (bio->bi_opf & REQ_RAHEAD)
what = READ_AHEAD_COMPLETED_WITH_ERROR;
else
what = READ_COMPLETED_WITH_ERROR;
Expand Down
2 changes: 1 addition & 1 deletion drivers/block/pktcdvd.c
Original file line number Diff line number Diff line change
Expand Up @@ -1157,7 +1157,7 @@ static int pkt_start_recovery(struct packet_data *pkt)

bio_reset(pkt->bio);
pkt->bio->bi_bdev = pd->bdev;
pkt->bio->bi_rw = REQ_WRITE;
bio_set_op_attrs(pkt->bio, REQ_OP_WRITE, 0);
pkt->bio->bi_iter.bi_sector = new_sector;
pkt->bio->bi_iter.bi_size = pkt->frames * CD_FRAMESIZE;
pkt->bio->bi_vcnt = pkt->frames;
Expand Down
2 changes: 1 addition & 1 deletion drivers/block/umem.c
Original file line number Diff line number Diff line change
Expand Up @@ -535,7 +535,7 @@ static blk_qc_t mm_make_request(struct request_queue *q, struct bio *bio)
*card->biotail = bio;
bio->bi_next = NULL;
card->biotail = &bio->bi_next;
if (bio->bi_rw & REQ_SYNC || !mm_check_plugged(card))
if (bio->bi_opf & REQ_SYNC || !mm_check_plugged(card))
activate(card);
spin_unlock_irq(&card->lock);

Expand Down
12 changes: 6 additions & 6 deletions drivers/md/bcache/request.c
Original file line number Diff line number Diff line change
Expand Up @@ -208,7 +208,7 @@ static void bch_data_insert_start(struct closure *cl)
* Journal writes are marked REQ_PREFLUSH; if the original write was a
* flush, it'll wait on the journal write.
*/
bio->bi_rw &= ~(REQ_PREFLUSH|REQ_FUA);
bio->bi_opf &= ~(REQ_PREFLUSH|REQ_FUA);

do {
unsigned i;
Expand Down Expand Up @@ -405,7 +405,7 @@ static bool check_should_bypass(struct cached_dev *dc, struct bio *bio)
if (!congested &&
mode == CACHE_MODE_WRITEBACK &&
op_is_write(bio_op(bio)) &&
(bio->bi_rw & REQ_SYNC))
(bio->bi_opf & REQ_SYNC))
goto rescale;

spin_lock(&dc->io_lock);
Expand Down Expand Up @@ -668,7 +668,7 @@ static inline struct search *search_alloc(struct bio *bio,
s->iop.write_prio = 0;
s->iop.error = 0;
s->iop.flags = 0;
s->iop.flush_journal = (bio->bi_rw & (REQ_PREFLUSH|REQ_FUA)) != 0;
s->iop.flush_journal = (bio->bi_opf & (REQ_PREFLUSH|REQ_FUA)) != 0;
s->iop.wq = bcache_wq;

return s;
Expand Down Expand Up @@ -796,8 +796,8 @@ static int cached_dev_cache_miss(struct btree *b, struct search *s,
goto out_submit;
}

if (!(bio->bi_rw & REQ_RAHEAD) &&
!(bio->bi_rw & REQ_META) &&
if (!(bio->bi_opf & REQ_RAHEAD) &&
!(bio->bi_opf & REQ_META) &&
s->iop.c->gc_stats.in_use < CUTOFF_CACHE_READA)
reada = min_t(sector_t, dc->readahead >> 9,
bdev_sectors(bio->bi_bdev) - bio_end_sector(bio));
Expand Down Expand Up @@ -920,7 +920,7 @@ static void cached_dev_write(struct cached_dev *dc, struct search *s)
bch_writeback_add(dc);
s->iop.bio = bio;

if (bio->bi_rw & REQ_PREFLUSH) {
if (bio->bi_opf & REQ_PREFLUSH) {
/* Also need to send a flush to the backing device */
struct bio *flush = bio_alloc_bioset(GFP_NOIO, 0,
dc->disk.bio_split);
Expand Down
Loading

0 comments on commit 1eff9d3

Please sign in to comment.