Skip to content

Commit

Permalink
virtio: move queue_index and num_free fields into core struct virtqueue.
Browse files Browse the repository at this point in the history
They're generic concepts, so hoist them.  This also avoids accessor
functions (though kept around for merge with DaveM's net tree).

This goes even further than Jason Wang's 17bb6d4 patch
("virtio-ring: move queue_index to vring_virtqueue") which moved the
queue_index from the specific transport.

Acked-by: Michael S. Tsirkin <[email protected]>
Signed-off-by: Rusty Russell <[email protected]>
  • Loading branch information
rustyrussell committed Dec 18, 2012
1 parent 1ce6853 commit 06ca287
Show file tree
Hide file tree
Showing 4 changed files with 28 additions and 30 deletions.
4 changes: 2 additions & 2 deletions drivers/virtio/virtio_mmio.c
Original file line number Diff line number Diff line change
Expand Up @@ -225,7 +225,7 @@ static void vm_notify(struct virtqueue *vq)

/* We write the queue's selector into the notification register to
* signal the other end */
writel(virtqueue_get_queue_index(vq), vm_dev->base + VIRTIO_MMIO_QUEUE_NOTIFY);
writel(vq->index, vm_dev->base + VIRTIO_MMIO_QUEUE_NOTIFY);
}

/* Notify all virtqueues on an interrupt. */
Expand Down Expand Up @@ -266,7 +266,7 @@ static void vm_del_vq(struct virtqueue *vq)
struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vq->vdev);
struct virtio_mmio_vq_info *info = vq->priv;
unsigned long flags, size;
unsigned int index = virtqueue_get_queue_index(vq);
unsigned int index = vq->index;

spin_lock_irqsave(&vm_dev->lock, flags);
list_del(&info->node);
Expand Down
6 changes: 2 additions & 4 deletions drivers/virtio/virtio_pci.c
Original file line number Diff line number Diff line change
Expand Up @@ -203,8 +203,7 @@ static void vp_notify(struct virtqueue *vq)

/* we write the queue's selector into the notification register to
* signal the other end */
iowrite16(virtqueue_get_queue_index(vq),
vp_dev->ioaddr + VIRTIO_PCI_QUEUE_NOTIFY);
iowrite16(vq->index, vp_dev->ioaddr + VIRTIO_PCI_QUEUE_NOTIFY);
}

/* Handle a configuration change: Tell driver if it wants to know. */
Expand Down Expand Up @@ -479,8 +478,7 @@ static void vp_del_vq(struct virtqueue *vq)
list_del(&info->node);
spin_unlock_irqrestore(&vp_dev->lock, flags);

iowrite16(virtqueue_get_queue_index(vq),
vp_dev->ioaddr + VIRTIO_PCI_QUEUE_SEL);
iowrite16(vq->index, vp_dev->ioaddr + VIRTIO_PCI_QUEUE_SEL);

if (vp_dev->msix_enabled) {
iowrite16(VIRTIO_MSI_NO_VECTOR,
Expand Down
34 changes: 11 additions & 23 deletions drivers/virtio/virtio_ring.c
Original file line number Diff line number Diff line change
Expand Up @@ -93,8 +93,6 @@ struct vring_virtqueue
/* Host publishes avail event idx */
bool event;

/* Number of free buffers */
unsigned int num_free;
/* Head of free buffer list. */
unsigned int free_head;
/* Number we've added since last sync. */
Expand All @@ -106,9 +104,6 @@ struct vring_virtqueue
/* How to notify other side. FIXME: commonalize hcalls! */
void (*notify)(struct virtqueue *vq);

/* Index of the queue */
int queue_index;

#ifdef DEBUG
/* They're supposed to lock for us. */
unsigned int in_use;
Expand Down Expand Up @@ -167,7 +162,7 @@ static int vring_add_indirect(struct vring_virtqueue *vq,
desc[i-1].next = 0;

/* We're about to use a buffer */
vq->num_free--;
vq->vq.num_free--;

/* Use a single buffer which doesn't continue */
head = vq->free_head;
Expand All @@ -181,13 +176,6 @@ static int vring_add_indirect(struct vring_virtqueue *vq,
return head;
}

int virtqueue_get_queue_index(struct virtqueue *_vq)
{
struct vring_virtqueue *vq = to_vvq(_vq);
return vq->queue_index;
}
EXPORT_SYMBOL_GPL(virtqueue_get_queue_index);

/**
* virtqueue_add_buf - expose buffer to other end
* @vq: the struct virtqueue we're talking about.
Expand Down Expand Up @@ -235,7 +223,7 @@ int virtqueue_add_buf(struct virtqueue *_vq,

/* If the host supports indirect descriptor tables, and we have multiple
* buffers, then go indirect. FIXME: tune this threshold */
if (vq->indirect && (out + in) > 1 && vq->num_free) {
if (vq->indirect && (out + in) > 1 && vq->vq.num_free) {
head = vring_add_indirect(vq, sg, out, in, gfp);
if (likely(head >= 0))
goto add_head;
Expand All @@ -244,9 +232,9 @@ int virtqueue_add_buf(struct virtqueue *_vq,
BUG_ON(out + in > vq->vring.num);
BUG_ON(out + in == 0);

if (vq->num_free < out + in) {
if (vq->vq.num_free < out + in) {
pr_debug("Can't add buf len %i - avail = %i\n",
out + in, vq->num_free);
out + in, vq->vq.num_free);
/* FIXME: for historical reasons, we force a notify here if
* there are outgoing parts to the buffer. Presumably the
* host should service the ring ASAP. */
Expand All @@ -257,7 +245,7 @@ int virtqueue_add_buf(struct virtqueue *_vq,
}

/* We're about to use some buffers from the free list. */
vq->num_free -= out + in;
vq->vq.num_free -= out + in;

head = vq->free_head;
for (i = vq->free_head; out; i = vq->vring.desc[i].next, out--) {
Expand Down Expand Up @@ -303,7 +291,7 @@ int virtqueue_add_buf(struct virtqueue *_vq,
pr_debug("Added buffer head %i to %p\n", head, vq);
END_USE(vq);

return vq->num_free;
return vq->vq.num_free;
}
EXPORT_SYMBOL_GPL(virtqueue_add_buf);

Expand Down Expand Up @@ -400,13 +388,13 @@ static void detach_buf(struct vring_virtqueue *vq, unsigned int head)

while (vq->vring.desc[i].flags & VRING_DESC_F_NEXT) {
i = vq->vring.desc[i].next;
vq->num_free++;
vq->vq.num_free++;
}

vq->vring.desc[i].next = vq->free_head;
vq->free_head = head;
/* Plus final descriptor */
vq->num_free++;
vq->vq.num_free++;
}

static inline bool more_used(const struct vring_virtqueue *vq)
Expand Down Expand Up @@ -606,7 +594,7 @@ void *virtqueue_detach_unused_buf(struct virtqueue *_vq)
return buf;
}
/* That should have freed everything. */
BUG_ON(vq->num_free != vq->vring.num);
BUG_ON(vq->vq.num_free != vq->vring.num);

END_USE(vq);
return NULL;
Expand Down Expand Up @@ -660,12 +648,13 @@ struct virtqueue *vring_new_virtqueue(unsigned int index,
vq->vq.callback = callback;
vq->vq.vdev = vdev;
vq->vq.name = name;
vq->vq.num_free = num;
vq->vq.index = index;
vq->notify = notify;
vq->weak_barriers = weak_barriers;
vq->broken = false;
vq->last_used_idx = 0;
vq->num_added = 0;
vq->queue_index = index;
list_add_tail(&vq->vq.list, &vdev->vqs);
#ifdef DEBUG
vq->in_use = false;
Expand All @@ -680,7 +669,6 @@ struct virtqueue *vring_new_virtqueue(unsigned int index,
vq->vring.avail->flags |= VRING_AVAIL_F_NO_INTERRUPT;

/* Put everything in free lists. */
vq->num_free = num;
vq->free_head = 0;
for (i = 0; i < num-1; i++) {
vq->vring.desc[i].next = i+1;
Expand Down
14 changes: 13 additions & 1 deletion include/linux/virtio.h
Original file line number Diff line number Diff line change
Expand Up @@ -16,12 +16,20 @@
* @name: the name of this virtqueue (mainly for debugging)
* @vdev: the virtio device this queue was created for.
* @priv: a pointer for the virtqueue implementation to use.
* @index: the zero-based ordinal number for this queue.
* @num_free: number of elements we expect to be able to fit.
*
* A note on @num_free: with indirect buffers, each buffer needs one
* element in the queue, otherwise a buffer will need one element per
* sg element.
*/
struct virtqueue {
struct list_head list;
void (*callback)(struct virtqueue *vq);
const char *name;
struct virtio_device *vdev;
unsigned int index;
unsigned int num_free;
void *priv;
};

Expand Down Expand Up @@ -50,7 +58,11 @@ void *virtqueue_detach_unused_buf(struct virtqueue *vq);

unsigned int virtqueue_get_vring_size(struct virtqueue *vq);

int virtqueue_get_queue_index(struct virtqueue *vq);
/* FIXME: Obsolete accessor, but required for virtio_net merge. */
static inline unsigned int virtqueue_get_queue_index(struct virtqueue *vq)
{
return vq->index;
}

/**
* virtio_device - representation of a device using virtio
Expand Down

0 comments on commit 06ca287

Please sign in to comment.