Skip to content

Commit

Permalink
virtio: destroy region cache during reset
Browse files Browse the repository at this point in the history
We don't destroy region cache during reset which can make the maps
of previous driver leaked to a buggy or malicious driver that don't
set vring address before starting to use the device. Fix this by
destroy the region cache during reset and validate it before trying to
see them.

Cc: Cornelia Huck <[email protected]>
Cc: Paolo Bonzini <[email protected]>
Reviewed-by: Cornelia Huck <[email protected]>
Signed-off-by: Jason Wang <[email protected]>
Reviewed-by: Michael S. Tsirkin <[email protected]>
Signed-off-by: Michael S. Tsirkin <[email protected]>
  • Loading branch information
jasowang authored and mstsirkin committed Mar 15, 2017
1 parent 168e4af commit e0e2d64
Showing 1 changed file with 30 additions and 15 deletions.
45 changes: 30 additions & 15 deletions hw/virtio/virtio.c
Original file line number Diff line number Diff line change
Expand Up @@ -185,18 +185,24 @@ static void vring_desc_read(VirtIODevice *vdev, VRingDesc *desc,
virtio_tswap16s(vdev, &desc->next);
}

static VRingMemoryRegionCaches *vring_get_region_caches(struct VirtQueue *vq)
{
VRingMemoryRegionCaches *caches = atomic_rcu_read(&vq->vring.caches);
assert(caches != NULL);
return caches;
}
/* Called within rcu_read_lock(). */
static inline uint16_t vring_avail_flags(VirtQueue *vq)
{
VRingMemoryRegionCaches *caches = atomic_rcu_read(&vq->vring.caches);
VRingMemoryRegionCaches *caches = vring_get_region_caches(vq);
hwaddr pa = offsetof(VRingAvail, flags);
return virtio_lduw_phys_cached(vq->vdev, &caches->avail, pa);
}

/* Called within rcu_read_lock(). */
static inline uint16_t vring_avail_idx(VirtQueue *vq)
{
VRingMemoryRegionCaches *caches = atomic_rcu_read(&vq->vring.caches);
VRingMemoryRegionCaches *caches = vring_get_region_caches(vq);
hwaddr pa = offsetof(VRingAvail, idx);
vq->shadow_avail_idx = virtio_lduw_phys_cached(vq->vdev, &caches->avail, pa);
return vq->shadow_avail_idx;
Expand All @@ -205,7 +211,7 @@ static inline uint16_t vring_avail_idx(VirtQueue *vq)
/* Called within rcu_read_lock(). */
static inline uint16_t vring_avail_ring(VirtQueue *vq, int i)
{
VRingMemoryRegionCaches *caches = atomic_rcu_read(&vq->vring.caches);
VRingMemoryRegionCaches *caches = vring_get_region_caches(vq);
hwaddr pa = offsetof(VRingAvail, ring[i]);
return virtio_lduw_phys_cached(vq->vdev, &caches->avail, pa);
}
Expand All @@ -220,7 +226,7 @@ static inline uint16_t vring_get_used_event(VirtQueue *vq)
static inline void vring_used_write(VirtQueue *vq, VRingUsedElem *uelem,
int i)
{
VRingMemoryRegionCaches *caches = atomic_rcu_read(&vq->vring.caches);
VRingMemoryRegionCaches *caches = vring_get_region_caches(vq);
hwaddr pa = offsetof(VRingUsed, ring[i]);
virtio_tswap32s(vq->vdev, &uelem->id);
virtio_tswap32s(vq->vdev, &uelem->len);
Expand All @@ -231,15 +237,15 @@ static inline void vring_used_write(VirtQueue *vq, VRingUsedElem *uelem,
/* Called within rcu_read_lock(). */
static uint16_t vring_used_idx(VirtQueue *vq)
{
VRingMemoryRegionCaches *caches = atomic_rcu_read(&vq->vring.caches);
VRingMemoryRegionCaches *caches = vring_get_region_caches(vq);
hwaddr pa = offsetof(VRingUsed, idx);
return virtio_lduw_phys_cached(vq->vdev, &caches->used, pa);
}

/* Called within rcu_read_lock(). */
static inline void vring_used_idx_set(VirtQueue *vq, uint16_t val)
{
VRingMemoryRegionCaches *caches = atomic_rcu_read(&vq->vring.caches);
VRingMemoryRegionCaches *caches = vring_get_region_caches(vq);
hwaddr pa = offsetof(VRingUsed, idx);
virtio_stw_phys_cached(vq->vdev, &caches->used, pa, val);
address_space_cache_invalidate(&caches->used, pa, sizeof(val));
Expand All @@ -249,7 +255,7 @@ static inline void vring_used_idx_set(VirtQueue *vq, uint16_t val)
/* Called within rcu_read_lock(). */
static inline void vring_used_flags_set_bit(VirtQueue *vq, int mask)
{
VRingMemoryRegionCaches *caches = atomic_rcu_read(&vq->vring.caches);
VRingMemoryRegionCaches *caches = vring_get_region_caches(vq);
VirtIODevice *vdev = vq->vdev;
hwaddr pa = offsetof(VRingUsed, flags);
uint16_t flags = virtio_lduw_phys_cached(vq->vdev, &caches->used, pa);
Expand All @@ -261,7 +267,7 @@ static inline void vring_used_flags_set_bit(VirtQueue *vq, int mask)
/* Called within rcu_read_lock(). */
static inline void vring_used_flags_unset_bit(VirtQueue *vq, int mask)
{
VRingMemoryRegionCaches *caches = atomic_rcu_read(&vq->vring.caches);
VRingMemoryRegionCaches *caches = vring_get_region_caches(vq);
VirtIODevice *vdev = vq->vdev;
hwaddr pa = offsetof(VRingUsed, flags);
uint16_t flags = virtio_lduw_phys_cached(vq->vdev, &caches->used, pa);
Expand All @@ -279,7 +285,7 @@ static inline void vring_set_avail_event(VirtQueue *vq, uint16_t val)
return;
}

caches = atomic_rcu_read(&vq->vring.caches);
caches = vring_get_region_caches(vq);
pa = offsetof(VRingUsed, ring[vq->vring.num]);
virtio_stw_phys_cached(vq->vdev, &caches->used, pa, val);
address_space_cache_invalidate(&caches->used, pa, sizeof(val));
Expand Down Expand Up @@ -577,7 +583,7 @@ void virtqueue_get_avail_bytes(VirtQueue *vq, unsigned int *in_bytes,
total_bufs = in_total = out_total = 0;

max = vq->vring.num;
caches = atomic_rcu_read(&vq->vring.caches);
caches = vring_get_region_caches(vq);
if (caches->desc.len < max * sizeof(VRingDesc)) {
virtio_error(vdev, "Cannot map descriptor ring");
goto err;
Expand Down Expand Up @@ -844,7 +850,7 @@ void *virtqueue_pop(VirtQueue *vq, size_t sz)

i = head;

caches = atomic_rcu_read(&vq->vring.caches);
caches = vring_get_region_caches(vq);
if (caches->desc.len < max * sizeof(VRingDesc)) {
virtio_error(vdev, "Cannot map descriptor ring");
goto done;
Expand Down Expand Up @@ -1143,6 +1149,17 @@ static enum virtio_device_endian virtio_current_cpu_endian(void)
}
}

static void virtio_virtqueue_reset_region_cache(struct VirtQueue *vq)
{
VRingMemoryRegionCaches *caches;

caches = atomic_read(&vq->vring.caches);
atomic_rcu_set(&vq->vring.caches, NULL);
if (caches) {
call_rcu(caches, virtio_free_region_cache, rcu);
}
}

void virtio_reset(void *opaque)
{
VirtIODevice *vdev = opaque;
Expand Down Expand Up @@ -1183,6 +1200,7 @@ void virtio_reset(void *opaque)
vdev->vq[i].notification = true;
vdev->vq[i].vring.num = vdev->vq[i].vring.num_default;
vdev->vq[i].inuse = 0;
virtio_virtqueue_reset_region_cache(&vdev->vq[i]);
}
}

Expand Down Expand Up @@ -2477,13 +2495,10 @@ static void virtio_device_free_virtqueues(VirtIODevice *vdev)
}

for (i = 0; i < VIRTIO_QUEUE_MAX; i++) {
VRingMemoryRegionCaches *caches;
if (vdev->vq[i].vring.num == 0) {
break;
}
caches = atomic_read(&vdev->vq[i].vring.caches);
atomic_set(&vdev->vq[i].vring.caches, NULL);
virtio_free_region_cache(caches);
virtio_virtqueue_reset_region_cache(&vdev->vq[i]);
}
g_free(vdev->vq);
}
Expand Down

0 comments on commit e0e2d64

Please sign in to comment.