Skip to content

Commit

Permalink
Merge tag 'vfio-v6.2-rc6' of https://github.com/awilliam/linux-vfio
Browse files Browse the repository at this point in the history
Pull VFIO fixes from Alex Williamson:

 - Honor reserved regions when testing for IOMMU find grained super page
   support, avoiding a regression on s390 for a firmware device where
   the existence of the mapping, even if unused can trigger an error
   state. (Niklas Schnelle)

 - Fix a deadlock in releasing KVM references by using the alternate
   .release() rather than .destroy() callback for the kvm-vfio device.
   (Yi Liu)

* tag 'vfio-v6.2-rc6' of https://github.com/awilliam/linux-vfio:
  kvm/vfio: Fix potential deadlock on vfio group_lock
  vfio/type1: Respect IOMMU reserved regions in vfio_test_domain_fgsp()
  • Loading branch information
torvalds committed Jan 23, 2023
2 parents 9946f09 + 51cdc8b commit 7bf70db
Show file tree
Hide file tree
Showing 2 changed files with 23 additions and 14 deletions.
31 changes: 20 additions & 11 deletions drivers/vfio/vfio_iommu_type1.c
Original file line number Diff line number Diff line change
Expand Up @@ -1856,24 +1856,33 @@ static int vfio_iommu_replay(struct vfio_iommu *iommu,
* significantly boosts non-hugetlbfs mappings and doesn't seem to hurt when
* hugetlbfs is in use.
*/
static void vfio_test_domain_fgsp(struct vfio_domain *domain)
static void vfio_test_domain_fgsp(struct vfio_domain *domain, struct list_head *regions)
{
struct page *pages;
int ret, order = get_order(PAGE_SIZE * 2);
struct vfio_iova *region;
struct page *pages;
dma_addr_t start;

pages = alloc_pages(GFP_KERNEL | __GFP_ZERO, order);
if (!pages)
return;

ret = iommu_map(domain->domain, 0, page_to_phys(pages), PAGE_SIZE * 2,
IOMMU_READ | IOMMU_WRITE | IOMMU_CACHE);
if (!ret) {
size_t unmapped = iommu_unmap(domain->domain, 0, PAGE_SIZE);
list_for_each_entry(region, regions, list) {
start = ALIGN(region->start, PAGE_SIZE * 2);
if (start >= region->end || (region->end - start < PAGE_SIZE * 2))
continue;

if (unmapped == PAGE_SIZE)
iommu_unmap(domain->domain, PAGE_SIZE, PAGE_SIZE);
else
domain->fgsp = true;
ret = iommu_map(domain->domain, start, page_to_phys(pages), PAGE_SIZE * 2,
IOMMU_READ | IOMMU_WRITE | IOMMU_CACHE);
if (!ret) {
size_t unmapped = iommu_unmap(domain->domain, start, PAGE_SIZE);

if (unmapped == PAGE_SIZE)
iommu_unmap(domain->domain, start + PAGE_SIZE, PAGE_SIZE);
else
domain->fgsp = true;
}
break;
}

__free_pages(pages, order);
Expand Down Expand Up @@ -2326,7 +2335,7 @@ static int vfio_iommu_type1_attach_group(void *iommu_data,
}
}

vfio_test_domain_fgsp(domain);
vfio_test_domain_fgsp(domain, &iova_copy);

/* replay mappings on new domains */
ret = vfio_iommu_replay(iommu, domain);
Expand Down
6 changes: 3 additions & 3 deletions virt/kvm/vfio.c
Original file line number Diff line number Diff line change
Expand Up @@ -336,7 +336,7 @@ static int kvm_vfio_has_attr(struct kvm_device *dev,
return -ENXIO;
}

static void kvm_vfio_destroy(struct kvm_device *dev)
static void kvm_vfio_release(struct kvm_device *dev)
{
struct kvm_vfio *kv = dev->private;
struct kvm_vfio_group *kvg, *tmp;
Expand All @@ -355,15 +355,15 @@ static void kvm_vfio_destroy(struct kvm_device *dev)
kvm_vfio_update_coherency(dev);

kfree(kv);
kfree(dev); /* alloc by kvm_ioctl_create_device, free by .destroy */
kfree(dev); /* alloc by kvm_ioctl_create_device, free by .release */
}

static int kvm_vfio_create(struct kvm_device *dev, u32 type);

static struct kvm_device_ops kvm_vfio_ops = {
.name = "kvm-vfio",
.create = kvm_vfio_create,
.destroy = kvm_vfio_destroy,
.release = kvm_vfio_release,
.set_attr = kvm_vfio_set_attr,
.has_attr = kvm_vfio_has_attr,
};
Expand Down

0 comments on commit 7bf70db

Please sign in to comment.