Skip to content

Commit

Permalink
blk-mq: abstract out queue map
Browse files Browse the repository at this point in the history
This is in preparation for allowing multiple sets of maps per
queue, if so desired.

Reviewed-by: Hannes Reinecke <[email protected]>
Reviewed-by: Bart Van Assche <[email protected]>
Reviewed-by: Keith Busch <[email protected]>
Signed-off-by: Jens Axboe <[email protected]>
  • Loading branch information
axboe committed Nov 7, 2018
1 parent a890893 commit ed76e32
Show file tree
Hide file tree
Showing 15 changed files with 64 additions and 50 deletions.
10 changes: 5 additions & 5 deletions block/blk-mq-cpumap.c
Original file line number Diff line number Diff line change
Expand Up @@ -30,10 +30,10 @@ static int get_first_sibling(unsigned int cpu)
return cpu;
}

int blk_mq_map_queues(struct blk_mq_tag_set *set)
int blk_mq_map_queues(struct blk_mq_queue_map *qmap)
{
unsigned int *map = set->mq_map;
unsigned int nr_queues = set->nr_hw_queues;
unsigned int *map = qmap->mq_map;
unsigned int nr_queues = qmap->nr_queues;
unsigned int cpu, first_sibling;

for_each_possible_cpu(cpu) {
Expand Down Expand Up @@ -62,12 +62,12 @@ EXPORT_SYMBOL_GPL(blk_mq_map_queues);
* We have no quick way of doing reverse lookups. This is only used at
* queue init time, so runtime isn't important.
*/
int blk_mq_hw_queue_to_node(unsigned int *mq_map, unsigned int index)
int blk_mq_hw_queue_to_node(struct blk_mq_queue_map *qmap, unsigned int index)
{
int i;

for_each_possible_cpu(i) {
if (index == mq_map[i])
if (index == qmap->mq_map[i])
return local_memory_node(cpu_to_node(i));
}

Expand Down
10 changes: 5 additions & 5 deletions block/blk-mq-pci.c
Original file line number Diff line number Diff line change
Expand Up @@ -31,26 +31,26 @@
* that maps a queue to the CPUs that have irq affinity for the corresponding
* vector.
*/
int blk_mq_pci_map_queues(struct blk_mq_tag_set *set, struct pci_dev *pdev,
int blk_mq_pci_map_queues(struct blk_mq_queue_map *qmap, struct pci_dev *pdev,
int offset)
{
const struct cpumask *mask;
unsigned int queue, cpu;

for (queue = 0; queue < set->nr_hw_queues; queue++) {
for (queue = 0; queue < qmap->nr_queues; queue++) {
mask = pci_irq_get_affinity(pdev, queue + offset);
if (!mask)
goto fallback;

for_each_cpu(cpu, mask)
set->mq_map[cpu] = queue;
qmap->mq_map[cpu] = queue;
}

return 0;

fallback:
WARN_ON_ONCE(set->nr_hw_queues > 1);
blk_mq_clear_mq_map(set);
WARN_ON_ONCE(qmap->nr_queues > 1);
blk_mq_clear_mq_map(qmap);
return 0;
}
EXPORT_SYMBOL_GPL(blk_mq_pci_map_queues);
4 changes: 2 additions & 2 deletions block/blk-mq-rdma.c
Original file line number Diff line number Diff line change
Expand Up @@ -41,12 +41,12 @@ int blk_mq_rdma_map_queues(struct blk_mq_tag_set *set,
goto fallback;

for_each_cpu(cpu, mask)
set->mq_map[cpu] = queue;
set->map[0].mq_map[cpu] = queue;
}

return 0;

fallback:
return blk_mq_map_queues(set);
return blk_mq_map_queues(&set->map[0]);
}
EXPORT_SYMBOL_GPL(blk_mq_rdma_map_queues);
8 changes: 4 additions & 4 deletions block/blk-mq-virtio.c
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,7 @@
* that maps a queue to the CPUs that have irq affinity for the corresponding
* vector.
*/
int blk_mq_virtio_map_queues(struct blk_mq_tag_set *set,
int blk_mq_virtio_map_queues(struct blk_mq_queue_map *qmap,
struct virtio_device *vdev, int first_vec)
{
const struct cpumask *mask;
Expand All @@ -38,17 +38,17 @@ int blk_mq_virtio_map_queues(struct blk_mq_tag_set *set,
if (!vdev->config->get_vq_affinity)
goto fallback;

for (queue = 0; queue < set->nr_hw_queues; queue++) {
for (queue = 0; queue < qmap->nr_queues; queue++) {
mask = vdev->config->get_vq_affinity(vdev, first_vec + queue);
if (!mask)
goto fallback;

for_each_cpu(cpu, mask)
set->mq_map[cpu] = queue;
qmap->mq_map[cpu] = queue;
}

return 0;
fallback:
return blk_mq_map_queues(set);
return blk_mq_map_queues(qmap);
}
EXPORT_SYMBOL_GPL(blk_mq_virtio_map_queues);
34 changes: 18 additions & 16 deletions block/blk-mq.c
Original file line number Diff line number Diff line change
Expand Up @@ -1975,7 +1975,7 @@ struct blk_mq_tags *blk_mq_alloc_rq_map(struct blk_mq_tag_set *set,
struct blk_mq_tags *tags;
int node;

node = blk_mq_hw_queue_to_node(set->mq_map, hctx_idx);
node = blk_mq_hw_queue_to_node(&set->map[0], hctx_idx);
if (node == NUMA_NO_NODE)
node = set->numa_node;

Expand Down Expand Up @@ -2031,7 +2031,7 @@ int blk_mq_alloc_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags,
size_t rq_size, left;
int node;

node = blk_mq_hw_queue_to_node(set->mq_map, hctx_idx);
node = blk_mq_hw_queue_to_node(&set->map[0], hctx_idx);
if (node == NUMA_NO_NODE)
node = set->numa_node;

Expand Down Expand Up @@ -2322,7 +2322,7 @@ static void blk_mq_map_swqueue(struct request_queue *q)
* If the cpu isn't present, the cpu is mapped to first hctx.
*/
for_each_possible_cpu(i) {
hctx_idx = set->mq_map[i];
hctx_idx = set->map[0].mq_map[i];
/* unmapped hw queue can be remapped after CPU topo changed */
if (!set->tags[hctx_idx] &&
!__blk_mq_alloc_rq_map(set, hctx_idx)) {
Expand All @@ -2332,7 +2332,7 @@ static void blk_mq_map_swqueue(struct request_queue *q)
* case, remap the current ctx to hctx[0] which
* is guaranteed to always have tags allocated
*/
set->mq_map[i] = 0;
set->map[0].mq_map[i] = 0;
}

ctx = per_cpu_ptr(q->queue_ctx, i);
Expand Down Expand Up @@ -2585,7 +2585,7 @@ static void blk_mq_realloc_hw_ctxs(struct blk_mq_tag_set *set,
int node;
struct blk_mq_hw_ctx *hctx;

node = blk_mq_hw_queue_to_node(set->mq_map, i);
node = blk_mq_hw_queue_to_node(&set->map[0], i);
/*
* If the hw queue has been mapped to another numa node,
* we need to realloc the hctx. If allocation fails, fallback
Expand Down Expand Up @@ -2791,18 +2791,18 @@ static int blk_mq_update_queue_map(struct blk_mq_tag_set *set)
* for (queue = 0; queue < set->nr_hw_queues; queue++) {
* mask = get_cpu_mask(queue)
* for_each_cpu(cpu, mask)
* set->mq_map[cpu] = queue;
* set->map.mq_map[cpu] = queue;
* }
*
* When we need to remap, the table has to be cleared for
* killing stale mapping since one CPU may not be mapped
* to any hw queue.
*/
blk_mq_clear_mq_map(set);
blk_mq_clear_mq_map(&set->map[0]);

return set->ops->map_queues(set);
} else
return blk_mq_map_queues(set);
return blk_mq_map_queues(&set->map[0]);
}

/*
Expand Down Expand Up @@ -2857,10 +2857,12 @@ int blk_mq_alloc_tag_set(struct blk_mq_tag_set *set)
return -ENOMEM;

ret = -ENOMEM;
set->mq_map = kcalloc_node(nr_cpu_ids, sizeof(*set->mq_map),
GFP_KERNEL, set->numa_node);
if (!set->mq_map)
set->map[0].mq_map = kcalloc_node(nr_cpu_ids,
sizeof(*set->map[0].mq_map),
GFP_KERNEL, set->numa_node);
if (!set->map[0].mq_map)
goto out_free_tags;
set->map[0].nr_queues = set->nr_hw_queues;

ret = blk_mq_update_queue_map(set);
if (ret)
Expand All @@ -2876,8 +2878,8 @@ int blk_mq_alloc_tag_set(struct blk_mq_tag_set *set)
return 0;

out_free_mq_map:
kfree(set->mq_map);
set->mq_map = NULL;
kfree(set->map[0].mq_map);
set->map[0].mq_map = NULL;
out_free_tags:
kfree(set->tags);
set->tags = NULL;
Expand All @@ -2892,8 +2894,8 @@ void blk_mq_free_tag_set(struct blk_mq_tag_set *set)
for (i = 0; i < nr_cpu_ids; i++)
blk_mq_free_map_and_requests(set, i);

kfree(set->mq_map);
set->mq_map = NULL;
kfree(set->map[0].mq_map);
set->map[0].mq_map = NULL;

kfree(set->tags);
set->tags = NULL;
Expand Down Expand Up @@ -3054,7 +3056,7 @@ static void __blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set,
pr_warn("Increasing nr_hw_queues to %d fails, fallback to %d\n",
nr_hw_queues, prev_nr_hw_queues);
set->nr_hw_queues = prev_nr_hw_queues;
blk_mq_map_queues(set);
blk_mq_map_queues(&set->map[0]);
goto fallback;
}
blk_mq_map_swqueue(q);
Expand Down
8 changes: 4 additions & 4 deletions block/blk-mq.h
Original file line number Diff line number Diff line change
Expand Up @@ -70,14 +70,14 @@ void blk_mq_try_issue_list_directly(struct blk_mq_hw_ctx *hctx,
/*
* CPU -> queue mappings
*/
extern int blk_mq_hw_queue_to_node(unsigned int *map, unsigned int);
extern int blk_mq_hw_queue_to_node(struct blk_mq_queue_map *qmap, unsigned int);

static inline struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *q,
int cpu)
{
struct blk_mq_tag_set *set = q->tag_set;

return q->queue_hw_ctx[set->mq_map[cpu]];
return q->queue_hw_ctx[set->map[0].mq_map[cpu]];
}

/*
Expand Down Expand Up @@ -206,12 +206,12 @@ static inline void blk_mq_put_driver_tag(struct request *rq)
__blk_mq_put_driver_tag(hctx, rq);
}

static inline void blk_mq_clear_mq_map(struct blk_mq_tag_set *set)
static inline void blk_mq_clear_mq_map(struct blk_mq_queue_map *qmap)
{
int cpu;

for_each_possible_cpu(cpu)
set->mq_map[cpu] = 0;
qmap->mq_map[cpu] = 0;
}

#endif
2 changes: 1 addition & 1 deletion drivers/block/virtio_blk.c
Original file line number Diff line number Diff line change
Expand Up @@ -624,7 +624,7 @@ static int virtblk_map_queues(struct blk_mq_tag_set *set)
{
struct virtio_blk *vblk = set->driver_data;

return blk_mq_virtio_map_queues(set, vblk->vdev, 0);
return blk_mq_virtio_map_queues(&set->map[0], vblk->vdev, 0);
}

#ifdef CONFIG_VIRTIO_BLK_SCSI
Expand Down
2 changes: 1 addition & 1 deletion drivers/nvme/host/pci.c
Original file line number Diff line number Diff line change
Expand Up @@ -435,7 +435,7 @@ static int nvme_pci_map_queues(struct blk_mq_tag_set *set)
{
struct nvme_dev *dev = set->driver_data;

return blk_mq_pci_map_queues(set, to_pci_dev(dev->dev),
return blk_mq_pci_map_queues(&set->map[0], to_pci_dev(dev->dev),
dev->num_vecs > 1 ? 1 /* admin queue */ : 0);
}

Expand Down
5 changes: 3 additions & 2 deletions drivers/scsi/qla2xxx/qla_os.c
Original file line number Diff line number Diff line change
Expand Up @@ -6934,11 +6934,12 @@ static int qla2xxx_map_queues(struct Scsi_Host *shost)
{
int rc;
scsi_qla_host_t *vha = (scsi_qla_host_t *)shost->hostdata;
struct blk_mq_queue_map *qmap = &shost->tag_set.map[0];

if (USER_CTRL_IRQ(vha->hw))
rc = blk_mq_map_queues(&shost->tag_set);
rc = blk_mq_map_queues(qmap);
else
rc = blk_mq_pci_map_queues(&shost->tag_set, vha->hw->pdev, 0);
rc = blk_mq_pci_map_queues(qmap, vha->hw->pdev, 0);
return rc;
}

Expand Down
2 changes: 1 addition & 1 deletion drivers/scsi/scsi_lib.c
Original file line number Diff line number Diff line change
Expand Up @@ -1812,7 +1812,7 @@ static int scsi_map_queues(struct blk_mq_tag_set *set)

if (shost->hostt->map_queues)
return shost->hostt->map_queues(shost);
return blk_mq_map_queues(set);
return blk_mq_map_queues(&set->map[0]);
}

void __scsi_init_queue(struct Scsi_Host *shost, struct request_queue *q)
Expand Down
3 changes: 2 additions & 1 deletion drivers/scsi/smartpqi/smartpqi_init.c
Original file line number Diff line number Diff line change
Expand Up @@ -5319,7 +5319,8 @@ static int pqi_map_queues(struct Scsi_Host *shost)
{
struct pqi_ctrl_info *ctrl_info = shost_to_hba(shost);

return blk_mq_pci_map_queues(&shost->tag_set, ctrl_info->pci_dev, 0);
return blk_mq_pci_map_queues(&shost->tag_set.map[0],
ctrl_info->pci_dev, 0);
}

static int pqi_getpciinfo_ioctl(struct pqi_ctrl_info *ctrl_info,
Expand Down
3 changes: 2 additions & 1 deletion drivers/scsi/virtio_scsi.c
Original file line number Diff line number Diff line change
Expand Up @@ -719,8 +719,9 @@ static void virtscsi_target_destroy(struct scsi_target *starget)
static int virtscsi_map_queues(struct Scsi_Host *shost)
{
struct virtio_scsi *vscsi = shost_priv(shost);
struct blk_mq_queue_map *qmap = &shost->tag_set.map[0];

return blk_mq_virtio_map_queues(&shost->tag_set, vscsi->vdev, 2);
return blk_mq_virtio_map_queues(qmap, vscsi->vdev, 2);
}

/*
Expand Down
4 changes: 2 additions & 2 deletions include/linux/blk-mq-pci.h
Original file line number Diff line number Diff line change
Expand Up @@ -2,10 +2,10 @@
#ifndef _LINUX_BLK_MQ_PCI_H
#define _LINUX_BLK_MQ_PCI_H

struct blk_mq_tag_set;
struct blk_mq_queue_map;
struct pci_dev;

int blk_mq_pci_map_queues(struct blk_mq_tag_set *set, struct pci_dev *pdev,
int blk_mq_pci_map_queues(struct blk_mq_queue_map *qmap, struct pci_dev *pdev,
int offset);

#endif /* _LINUX_BLK_MQ_PCI_H */
4 changes: 2 additions & 2 deletions include/linux/blk-mq-virtio.h
Original file line number Diff line number Diff line change
Expand Up @@ -2,10 +2,10 @@
#ifndef _LINUX_BLK_MQ_VIRTIO_H
#define _LINUX_BLK_MQ_VIRTIO_H

struct blk_mq_tag_set;
struct blk_mq_queue_map;
struct virtio_device;

int blk_mq_virtio_map_queues(struct blk_mq_tag_set *set,
int blk_mq_virtio_map_queues(struct blk_mq_queue_map *qmap,
struct virtio_device *vdev, int first_vec);

#endif /* _LINUX_BLK_MQ_VIRTIO_H */
15 changes: 12 additions & 3 deletions include/linux/blk-mq.h
Original file line number Diff line number Diff line change
Expand Up @@ -74,10 +74,19 @@ struct blk_mq_hw_ctx {
struct srcu_struct srcu[0];
};

struct blk_mq_queue_map {
unsigned int *mq_map;
unsigned int nr_queues;
};

enum {
HCTX_MAX_TYPES = 1,
};

struct blk_mq_tag_set {
unsigned int *mq_map;
struct blk_mq_queue_map map[HCTX_MAX_TYPES];
const struct blk_mq_ops *ops;
unsigned int nr_hw_queues;
unsigned int nr_hw_queues; /* nr hw queues across maps */
unsigned int queue_depth; /* max hw supported */
unsigned int reserved_tags;
unsigned int cmd_size; /* per-request extra data */
Expand Down Expand Up @@ -295,7 +304,7 @@ void blk_mq_freeze_queue_wait(struct request_queue *q);
int blk_mq_freeze_queue_wait_timeout(struct request_queue *q,
unsigned long timeout);

int blk_mq_map_queues(struct blk_mq_tag_set *set);
int blk_mq_map_queues(struct blk_mq_queue_map *qmap);
void blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set, int nr_hw_queues);

void blk_mq_quiesce_queue_nowait(struct request_queue *q);
Expand Down

0 comments on commit ed76e32

Please sign in to comment.