Skip to content

Commit

Permalink
block: get rid of bio_rw and READA
Browse files Browse the repository at this point in the history
These two are confusing leftover of the old world order, combining
values of the REQ_OP_ and REQ_ namespaces.  For callers that don't
special case we mostly just replace bi_rw with bio_data_dir or
op_is_write, except for the few cases where a switch over the REQ_OP_
values makes more sense.  Any check for READA is replaced with an
explicit check for REQ_RAHEAD.  Also remove the READA alias for
REQ_RAHEAD.

Signed-off-by: Christoph Hellwig <[email protected]>
Reviewed-by: Johannes Thumshirn <[email protected]>
Reviewed-by: Mike Christie <[email protected]>
Signed-off-by: Jens Axboe <[email protected]>
  • Loading branch information
Christoph Hellwig authored and axboe committed Jul 20, 2016
1 parent 3f40bf2 commit 7024628
Show file tree
Hide file tree
Showing 26 changed files with 95 additions and 96 deletions.
4 changes: 1 addition & 3 deletions drivers/block/brd.c
Original file line number Diff line number Diff line change
Expand Up @@ -347,9 +347,7 @@ static blk_qc_t brd_make_request(struct request_queue *q, struct bio *bio)
goto out;
}

rw = bio_rw(bio);
if (rw == READA)
rw = READ;
rw = bio_data_dir(bio);

bio_for_each_segment(bvec, bio, iter) {
unsigned int len = bvec.bv_len;
Expand Down
34 changes: 19 additions & 15 deletions drivers/block/drbd/drbd_req.c
Original file line number Diff line number Diff line change
Expand Up @@ -219,7 +219,6 @@ void drbd_req_complete(struct drbd_request *req, struct bio_and_error *m)
{
const unsigned s = req->rq_state;
struct drbd_device *device = req->device;
int rw;
int error, ok;

/* we must not complete the master bio, while it is
Expand All @@ -243,8 +242,6 @@ void drbd_req_complete(struct drbd_request *req, struct bio_and_error *m)
return;
}

rw = bio_rw(req->master_bio);

/*
* figure out whether to report success or failure.
*
Expand All @@ -268,7 +265,7 @@ void drbd_req_complete(struct drbd_request *req, struct bio_and_error *m)
* epoch number. If they match, increase the current_tle_nr,
* and reset the transfer log epoch write_cnt.
*/
if (rw == WRITE &&
if (op_is_write(bio_op(req->master_bio)) &&
req->epoch == atomic_read(&first_peer_device(device)->connection->current_tle_nr))
start_new_tl_epoch(first_peer_device(device)->connection);

Expand All @@ -285,11 +282,14 @@ void drbd_req_complete(struct drbd_request *req, struct bio_and_error *m)
* because no path was available, in which case
* it was not even added to the transfer_log.
*
* READA may fail, and will not be retried.
* read-ahead may fail, and will not be retried.
*
* WRITE should have used all available paths already.
*/
if (!ok && rw == READ && !list_empty(&req->tl_requests))
if (!ok &&
bio_op(req->master_bio) == REQ_OP_READ &&
!(req->master_bio->bi_rw & REQ_RAHEAD) &&
!list_empty(&req->tl_requests))
req->rq_state |= RQ_POSTPONED;

if (!(req->rq_state & RQ_POSTPONED)) {
Expand Down Expand Up @@ -645,7 +645,7 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what,
__drbd_chk_io_error(device, DRBD_READ_ERROR);
/* fall through. */
case READ_AHEAD_COMPLETED_WITH_ERROR:
/* it is legal to fail READA, no __drbd_chk_io_error in that case. */
/* it is legal to fail read-ahead, no __drbd_chk_io_error in that case. */
mod_rq_state(req, m, RQ_LOCAL_PENDING, RQ_LOCAL_COMPLETED);
break;

Expand All @@ -657,7 +657,7 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what,
break;

case QUEUE_FOR_NET_READ:
/* READ or READA, and
/* READ, and
* no local disk,
* or target area marked as invalid,
* or just got an io-error. */
Expand Down Expand Up @@ -1172,7 +1172,14 @@ drbd_submit_req_private_bio(struct drbd_request *req)
{
struct drbd_device *device = req->device;
struct bio *bio = req->private_bio;
const int rw = bio_rw(bio);
unsigned int type;

if (bio_op(bio) != REQ_OP_READ)
type = DRBD_FAULT_DT_WR;
else if (bio->bi_rw & REQ_RAHEAD)
type = DRBD_FAULT_DT_RA;
else
type = DRBD_FAULT_DT_RD;

bio->bi_bdev = device->ldev->backing_bdev;

Expand All @@ -1182,10 +1189,7 @@ drbd_submit_req_private_bio(struct drbd_request *req)
* stable storage, and this is a WRITE, we may not even submit
* this bio. */
if (get_ldev(device)) {
if (drbd_insert_fault(device,
rw == WRITE ? DRBD_FAULT_DT_WR
: rw == READ ? DRBD_FAULT_DT_RD
: DRBD_FAULT_DT_RA))
if (drbd_insert_fault(device, type))
bio_io_error(bio);
else if (bio_op(bio) == REQ_OP_DISCARD)
drbd_process_discard_req(req);
Expand Down Expand Up @@ -1278,7 +1282,7 @@ static bool may_do_writes(struct drbd_device *device)
static void drbd_send_and_submit(struct drbd_device *device, struct drbd_request *req)
{
struct drbd_resource *resource = device->resource;
const int rw = bio_rw(req->master_bio);
const int rw = bio_data_dir(req->master_bio);
struct bio_and_error m = { NULL, };
bool no_remote = false;
bool submit_private_bio = false;
Expand Down Expand Up @@ -1308,7 +1312,7 @@ static void drbd_send_and_submit(struct drbd_device *device, struct drbd_request
goto out;
}

/* We fail READ/READA early, if we can not serve it.
/* We fail READ early, if we can not serve it.
* We must do this before req is registered on any lists.
* Otherwise, drbd_req_complete() will queue failed READ for retry. */
if (rw != WRITE) {
Expand Down
30 changes: 19 additions & 11 deletions drivers/block/drbd/drbd_worker.c
Original file line number Diff line number Diff line change
Expand Up @@ -248,18 +248,26 @@ void drbd_request_endio(struct bio *bio)

/* to avoid recursion in __req_mod */
if (unlikely(bio->bi_error)) {
if (bio_op(bio) == REQ_OP_DISCARD)
what = (bio->bi_error == -EOPNOTSUPP)
? DISCARD_COMPLETED_NOTSUPP
: DISCARD_COMPLETED_WITH_ERROR;
else
what = (bio_data_dir(bio) == WRITE)
? WRITE_COMPLETED_WITH_ERROR
: (bio_rw(bio) == READ)
? READ_COMPLETED_WITH_ERROR
: READ_AHEAD_COMPLETED_WITH_ERROR;
} else
switch (bio_op(bio)) {
case REQ_OP_DISCARD:
if (bio->bi_error == -EOPNOTSUPP)
what = DISCARD_COMPLETED_NOTSUPP;
else
what = DISCARD_COMPLETED_WITH_ERROR;
break;
case REQ_OP_READ:
if (bio->bi_rw & REQ_RAHEAD)
what = READ_AHEAD_COMPLETED_WITH_ERROR;
else
what = READ_COMPLETED_WITH_ERROR;
break;
default:
what = WRITE_COMPLETED_WITH_ERROR;
break;
}
} else {
what = COMPLETED_OK;
}

bio_put(req->private_bio);
req->private_bio = ERR_PTR(bio->bi_error);
Expand Down
2 changes: 1 addition & 1 deletion drivers/block/null_blk.c
Original file line number Diff line number Diff line change
Expand Up @@ -448,7 +448,7 @@ static int null_lnvm_submit_io(struct nvm_dev *dev, struct nvm_rq *rqd)
struct request *rq;
struct bio *bio = rqd->bio;

rq = blk_mq_alloc_request(q, bio_rw(bio), 0);
rq = blk_mq_alloc_request(q, bio_data_dir(bio), 0);
if (IS_ERR(rq))
return -ENOMEM;

Expand Down
6 changes: 2 additions & 4 deletions drivers/block/umem.c
Original file line number Diff line number Diff line change
Expand Up @@ -344,7 +344,6 @@ static int add_bio(struct cardinfo *card)
int offset;
struct bio *bio;
struct bio_vec vec;
int rw;

bio = card->currentbio;
if (!bio && card->bio) {
Expand All @@ -359,7 +358,6 @@ static int add_bio(struct cardinfo *card)
if (!bio)
return 0;

rw = bio_rw(bio);
if (card->mm_pages[card->Ready].cnt >= DESC_PER_PAGE)
return 0;

Expand All @@ -369,7 +367,7 @@ static int add_bio(struct cardinfo *card)
vec.bv_page,
vec.bv_offset,
vec.bv_len,
(rw == READ) ?
bio_op(bio) == REQ_OP_READ ?
PCI_DMA_FROMDEVICE : PCI_DMA_TODEVICE);

p = &card->mm_pages[card->Ready];
Expand Down Expand Up @@ -398,7 +396,7 @@ static int add_bio(struct cardinfo *card)
DMASCR_CHAIN_EN |
DMASCR_SEM_EN |
pci_cmds);
if (rw == WRITE)
if (bio_op(bio) == REQ_OP_WRITE)
desc->control_bits |= cpu_to_le32(DMASCR_TRANSFER_READ);
desc->sem_control_bits = desc->control_bits;

Expand Down
4 changes: 2 additions & 2 deletions drivers/lightnvm/rrpc.c
Original file line number Diff line number Diff line change
Expand Up @@ -853,14 +853,14 @@ static int rrpc_setup_rq(struct rrpc *rrpc, struct bio *bio,
return NVM_IO_ERR;
}

if (bio_rw(bio) == WRITE)
if (bio_op(bio) == REQ_OP_WRITE)
return rrpc_write_ppalist_rq(rrpc, bio, rqd, flags,
npages);

return rrpc_read_ppalist_rq(rrpc, bio, rqd, flags, npages);
}

if (bio_rw(bio) == WRITE)
if (bio_op(bio) == REQ_OP_WRITE)
return rrpc_write_rq(rrpc, bio, rqd, flags);

return rrpc_read_rq(rrpc, bio, rqd, flags);
Expand Down
8 changes: 4 additions & 4 deletions drivers/md/dm-raid1.c
Original file line number Diff line number Diff line change
Expand Up @@ -528,7 +528,7 @@ static void read_callback(unsigned long error, void *context)
DMWARN_LIMIT("Read failure on mirror device %s. "
"Trying alternative device.",
m->dev->name);
queue_bio(m->ms, bio, bio_rw(bio));
queue_bio(m->ms, bio, bio_data_dir(bio));
return;
}

Expand Down Expand Up @@ -1193,7 +1193,7 @@ static void mirror_dtr(struct dm_target *ti)
*/
static int mirror_map(struct dm_target *ti, struct bio *bio)
{
int r, rw = bio_rw(bio);
int r, rw = bio_data_dir(bio);
struct mirror *m;
struct mirror_set *ms = ti->private;
struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh);
Expand All @@ -1217,7 +1217,7 @@ static int mirror_map(struct dm_target *ti, struct bio *bio)
* If region is not in-sync queue the bio.
*/
if (!r || (r == -EWOULDBLOCK)) {
if (rw == READA)
if (bio->bi_rw & REQ_RAHEAD)
return -EWOULDBLOCK;

queue_bio(ms, bio, rw);
Expand All @@ -1242,7 +1242,7 @@ static int mirror_map(struct dm_target *ti, struct bio *bio)

static int mirror_end_io(struct dm_target *ti, struct bio *bio, int error)
{
int rw = bio_rw(bio);
int rw = bio_data_dir(bio);
struct mirror_set *ms = (struct mirror_set *) ti->private;
struct mirror *m = NULL;
struct dm_bio_details *bd = NULL;
Expand Down
13 changes: 7 additions & 6 deletions drivers/md/dm-snap.c
Original file line number Diff line number Diff line change
Expand Up @@ -1696,7 +1696,8 @@ static int snapshot_map(struct dm_target *ti, struct bio *bio)
* to copy an exception */
down_write(&s->lock);

if (!s->valid || (unlikely(s->snapshot_overflowed) && bio_rw(bio) == WRITE)) {
if (!s->valid || (unlikely(s->snapshot_overflowed) &&
bio_data_dir(bio) == WRITE)) {
r = -EIO;
goto out_unlock;
}
Expand All @@ -1713,7 +1714,7 @@ static int snapshot_map(struct dm_target *ti, struct bio *bio)
* flags so we should only get this if we are
* writeable.
*/
if (bio_rw(bio) == WRITE) {
if (bio_data_dir(bio) == WRITE) {
pe = __lookup_pending_exception(s, chunk);
if (!pe) {
up_write(&s->lock);
Expand Down Expand Up @@ -1819,7 +1820,7 @@ static int snapshot_merge_map(struct dm_target *ti, struct bio *bio)
e = dm_lookup_exception(&s->complete, chunk);
if (e) {
/* Queue writes overlapping with chunks being merged */
if (bio_rw(bio) == WRITE &&
if (bio_data_dir(bio) == WRITE &&
chunk >= s->first_merging_chunk &&
chunk < (s->first_merging_chunk +
s->num_merging_chunks)) {
Expand All @@ -1831,15 +1832,15 @@ static int snapshot_merge_map(struct dm_target *ti, struct bio *bio)

remap_exception(s, e, bio, chunk);

if (bio_rw(bio) == WRITE)
if (bio_data_dir(bio) == WRITE)
track_chunk(s, bio, chunk);
goto out_unlock;
}

redirect_to_origin:
bio->bi_bdev = s->origin->bdev;

if (bio_rw(bio) == WRITE) {
if (bio_data_dir(bio) == WRITE) {
up_write(&s->lock);
return do_origin(s->origin, bio);
}
Expand Down Expand Up @@ -2288,7 +2289,7 @@ static int origin_map(struct dm_target *ti, struct bio *bio)
if (unlikely(bio->bi_rw & REQ_PREFLUSH))
return DM_MAPIO_REMAPPED;

if (bio_rw(bio) != WRITE)
if (bio_data_dir(bio) != WRITE)
return DM_MAPIO_REMAPPED;

available_sectors = o->split_boundary -
Expand Down
15 changes: 9 additions & 6 deletions drivers/md/dm-zero.c
Original file line number Diff line number Diff line change
Expand Up @@ -35,16 +35,19 @@ static int zero_ctr(struct dm_target *ti, unsigned int argc, char **argv)
*/
static int zero_map(struct dm_target *ti, struct bio *bio)
{
switch(bio_rw(bio)) {
case READ:
switch (bio_op(bio)) {
case REQ_OP_READ:
if (bio->bi_rw & REQ_RAHEAD) {
/* readahead of null bytes only wastes buffer cache */
return -EIO;
}
zero_fill_bio(bio);
break;
case READA:
/* readahead of null bytes only wastes buffer cache */
return -EIO;
case WRITE:
case REQ_OP_WRITE:
/* writes get silently dropped */
break;
default:
return -EIO;
}

bio_endio(bio);
Expand Down
2 changes: 1 addition & 1 deletion drivers/md/dm.c
Original file line number Diff line number Diff line change
Expand Up @@ -1833,7 +1833,7 @@ static blk_qc_t dm_make_request(struct request_queue *q, struct bio *bio)
if (unlikely(test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags))) {
dm_put_live_table(md, srcu_idx);

if (bio_rw(bio) != READA)
if (!(bio->bi_rw & REQ_RAHEAD))
queue_io(md, bio);
else
bio_io_error(bio);
Expand Down
2 changes: 1 addition & 1 deletion drivers/md/raid1.c
Original file line number Diff line number Diff line change
Expand Up @@ -1105,7 +1105,7 @@ static void raid1_make_request(struct mddev *mddev, struct bio * bio)
bitmap = mddev->bitmap;

/*
* make_request() can abort the operation when READA is being
* make_request() can abort the operation when read-ahead is being
* used and no empty request is available.
*
*/
Expand Down
2 changes: 1 addition & 1 deletion drivers/md/raid5.c
Original file line number Diff line number Diff line change
Expand Up @@ -5233,7 +5233,7 @@ static void raid5_make_request(struct mddev *mddev, struct bio * bi)
(unsigned long long)logical_sector);

sh = raid5_get_active_stripe(conf, new_sector, previous,
(bi->bi_rw&RWA_MASK), 0);
(bi->bi_rw & REQ_RAHEAD), 0);
if (sh) {
if (unlikely(previous)) {
/* expansion might have moved on while waiting for a
Expand Down
2 changes: 1 addition & 1 deletion drivers/nvme/host/lightnvm.c
Original file line number Diff line number Diff line change
Expand Up @@ -500,7 +500,7 @@ static int nvme_nvm_submit_io(struct nvm_dev *dev, struct nvm_rq *rqd)
struct bio *bio = rqd->bio;
struct nvme_nvm_command *cmd;

rq = blk_mq_alloc_request(q, bio_rw(bio), 0);
rq = blk_mq_alloc_request(q, bio_data_dir(bio), 0);
if (IS_ERR(rq))
return -ENOMEM;

Expand Down
13 changes: 7 additions & 6 deletions drivers/staging/lustre/lustre/llite/lloop.c
Original file line number Diff line number Diff line change
Expand Up @@ -336,7 +336,6 @@ static unsigned int loop_get_bio(struct lloop_device *lo, struct bio **req)
static blk_qc_t loop_make_request(struct request_queue *q, struct bio *old_bio)
{
struct lloop_device *lo = q->queuedata;
int rw = bio_rw(old_bio);
int inactive;

blk_queue_split(q, &old_bio, q->bio_split);
Expand All @@ -354,13 +353,15 @@ static blk_qc_t loop_make_request(struct request_queue *q, struct bio *old_bio)
if (inactive)
goto err;

if (rw == WRITE) {
switch (bio_op(old_bio)) {
case REQ_OP_WRITE:
if (lo->lo_flags & LO_FLAGS_READ_ONLY)
goto err;
} else if (rw == READA) {
rw = READ;
} else if (rw != READ) {
CERROR("lloop: unknown command (%x)\n", rw);
break;
case REQ_OP_READ:
break;
default:
CERROR("lloop: unknown command (%x)\n", bio_op(old_bio));
goto err;
}
loop_add_bio(lo, old_bio);
Expand Down
Loading

0 comments on commit 7024628

Please sign in to comment.