Skip to content

Commit

Permalink
vhost_scsi: make SCSI cmd completion per vq
Browse files Browse the repository at this point in the history
This patch separates the scsi cmd completion code paths so we can complete
cmds based on their vq instead of having all cmds complete on the same
worker/CPU. This will be useful with the next patches that allow us to
create mulitple worker threads and bind them to different vqs, and we can
have completions running on different threads/CPUs.

Signed-off-by: Mike Christie <[email protected]>
Reviewed-by: Stefan Hajnoczi <[email protected]>
Message-Id: <[email protected]>
Signed-off-by: Michael S. Tsirkin <[email protected]>
  • Loading branch information
mikechristie authored and mstsirkin committed Jul 3, 2023
1 parent 9e09d0e commit 48ae70d
Showing 1 changed file with 26 additions and 30 deletions.
56 changes: 26 additions & 30 deletions drivers/vhost/scsi.c
Original file line number Diff line number Diff line change
Expand Up @@ -167,6 +167,7 @@ MODULE_PARM_DESC(max_io_vqs, "Set the max number of IO virtqueues a vhost scsi d

struct vhost_scsi_virtqueue {
struct vhost_virtqueue vq;
struct vhost_scsi *vs;
/*
* Reference counting for inflight reqs, used for flush operation. At
* each time, one reference tracks new commands submitted, while we
Expand All @@ -181,6 +182,9 @@ struct vhost_scsi_virtqueue {
struct vhost_scsi_cmd *scsi_cmds;
struct sbitmap scsi_tags;
int max_cmds;

struct vhost_work completion_work;
struct llist_head completion_list;
};

struct vhost_scsi {
Expand All @@ -190,12 +194,8 @@ struct vhost_scsi {

struct vhost_dev dev;
struct vhost_scsi_virtqueue *vqs;
unsigned long *compl_bitmap;
struct vhost_scsi_inflight **old_inflight;

struct vhost_work vs_completion_work; /* cmd completion work item */
struct llist_head vs_completion_list; /* cmd completion queue */

struct vhost_work vs_event_work; /* evt injection work item */
struct llist_head vs_event_list; /* evt injection queue */

Expand Down Expand Up @@ -358,10 +358,11 @@ static void vhost_scsi_release_cmd(struct se_cmd *se_cmd)
} else {
struct vhost_scsi_cmd *cmd = container_of(se_cmd,
struct vhost_scsi_cmd, tvc_se_cmd);
struct vhost_scsi *vs = cmd->tvc_vhost;
struct vhost_scsi_virtqueue *svq = container_of(cmd->tvc_vq,
struct vhost_scsi_virtqueue, vq);

llist_add(&cmd->tvc_completion_list, &vs->vs_completion_list);
vhost_work_queue(&vs->dev, &vs->vs_completion_work);
llist_add(&cmd->tvc_completion_list, &svq->completion_list);
vhost_vq_work_queue(&svq->vq, &svq->completion_work);
}
}

Expand Down Expand Up @@ -509,17 +510,17 @@ static void vhost_scsi_evt_work(struct vhost_work *work)
*/
static void vhost_scsi_complete_cmd_work(struct vhost_work *work)
{
struct vhost_scsi *vs = container_of(work, struct vhost_scsi,
vs_completion_work);
struct vhost_scsi_virtqueue *svq = container_of(work,
struct vhost_scsi_virtqueue, completion_work);
struct virtio_scsi_cmd_resp v_rsp;
struct vhost_scsi_cmd *cmd, *t;
struct llist_node *llnode;
struct se_cmd *se_cmd;
struct iov_iter iov_iter;
int ret, vq;
bool signal = false;
int ret;

bitmap_zero(vs->compl_bitmap, vs->dev.nvqs);
llnode = llist_del_all(&vs->vs_completion_list);
llnode = llist_del_all(&svq->completion_list);
llist_for_each_entry_safe(cmd, t, llnode, tvc_completion_list) {
se_cmd = &cmd->tvc_se_cmd;

Expand All @@ -539,21 +540,17 @@ static void vhost_scsi_complete_cmd_work(struct vhost_work *work)
cmd->tvc_in_iovs, sizeof(v_rsp));
ret = copy_to_iter(&v_rsp, sizeof(v_rsp), &iov_iter);
if (likely(ret == sizeof(v_rsp))) {
struct vhost_scsi_virtqueue *q;
signal = true;

vhost_add_used(cmd->tvc_vq, cmd->tvc_vq_desc, 0);
q = container_of(cmd->tvc_vq, struct vhost_scsi_virtqueue, vq);
vq = q - vs->vqs;
__set_bit(vq, vs->compl_bitmap);
} else
pr_err("Faulted on virtio_scsi_cmd_resp\n");

vhost_scsi_release_cmd_res(se_cmd);
}

vq = -1;
while ((vq = find_next_bit(vs->compl_bitmap, vs->dev.nvqs, vq + 1))
< vs->dev.nvqs)
vhost_signal(&vs->dev, &vs->vqs[vq].vq);
if (signal)
vhost_signal(&svq->vs->dev, &svq->vq);
}

static struct vhost_scsi_cmd *
Expand Down Expand Up @@ -1770,6 +1767,7 @@ static int vhost_scsi_set_features(struct vhost_scsi *vs, u64 features)

static int vhost_scsi_open(struct inode *inode, struct file *f)
{
struct vhost_scsi_virtqueue *svq;
struct vhost_scsi *vs;
struct vhost_virtqueue **vqs;
int r = -ENOMEM, i, nvqs = vhost_scsi_max_io_vqs;
Expand All @@ -1788,10 +1786,6 @@ static int vhost_scsi_open(struct inode *inode, struct file *f)
}
nvqs += VHOST_SCSI_VQ_IO;

vs->compl_bitmap = bitmap_alloc(nvqs, GFP_KERNEL);
if (!vs->compl_bitmap)
goto err_compl_bitmap;

vs->old_inflight = kmalloc_array(nvqs, sizeof(*vs->old_inflight),
GFP_KERNEL | __GFP_ZERO);
if (!vs->old_inflight)
Expand All @@ -1806,7 +1800,6 @@ static int vhost_scsi_open(struct inode *inode, struct file *f)
if (!vqs)
goto err_local_vqs;

vhost_work_init(&vs->vs_completion_work, vhost_scsi_complete_cmd_work);
vhost_work_init(&vs->vs_event_work, vhost_scsi_evt_work);

vs->vs_events_nr = 0;
Expand All @@ -1817,8 +1810,14 @@ static int vhost_scsi_open(struct inode *inode, struct file *f)
vs->vqs[VHOST_SCSI_VQ_CTL].vq.handle_kick = vhost_scsi_ctl_handle_kick;
vs->vqs[VHOST_SCSI_VQ_EVT].vq.handle_kick = vhost_scsi_evt_handle_kick;
for (i = VHOST_SCSI_VQ_IO; i < nvqs; i++) {
vqs[i] = &vs->vqs[i].vq;
vs->vqs[i].vq.handle_kick = vhost_scsi_handle_kick;
svq = &vs->vqs[i];

vqs[i] = &svq->vq;
svq->vs = vs;
init_llist_head(&svq->completion_list);
vhost_work_init(&svq->completion_work,
vhost_scsi_complete_cmd_work);
svq->vq.handle_kick = vhost_scsi_handle_kick;
}
vhost_dev_init(&vs->dev, vqs, nvqs, UIO_MAXIOV,
VHOST_SCSI_WEIGHT, 0, true, NULL);
Expand All @@ -1833,8 +1832,6 @@ static int vhost_scsi_open(struct inode *inode, struct file *f)
err_vqs:
kfree(vs->old_inflight);
err_inflight:
bitmap_free(vs->compl_bitmap);
err_compl_bitmap:
kvfree(vs);
err_vs:
return r;
Expand All @@ -1854,7 +1851,6 @@ static int vhost_scsi_release(struct inode *inode, struct file *f)
kfree(vs->dev.vqs);
kfree(vs->vqs);
kfree(vs->old_inflight);
bitmap_free(vs->compl_bitmap);
kvfree(vs);
return 0;
}
Expand Down

0 comments on commit 48ae70d

Please sign in to comment.