Skip to content

Commit

Permalink
ARM/dma-mapping: merge IOMMU ops
Browse files Browse the repository at this point in the history
The dma_sync_* operations are now the only difference between the
coherent and non-coherent IOMMU ops. Some minor tweaks to make those
safe for coherent devices with minimal overhead, and we can condense
down to a single set of DMA ops.

Signed-off-by: Robin Murphy <[email protected]>
Signed-off-by: Christoph Hellwig <[email protected]>
Tested-by: Marc Zyngier <[email protected]>
  • Loading branch information
rmurphy-arm authored and Christoph Hellwig committed Jul 7, 2022
1 parent d563bcc commit 4136ce9
Showing 1 changed file with 13 additions and 24 deletions.
37 changes: 13 additions & 24 deletions arch/arm/mm/dma-mapping.c
Original file line number Diff line number Diff line change
Expand Up @@ -1341,6 +1341,9 @@ static void arm_iommu_sync_sg_for_cpu(struct device *dev,
struct scatterlist *s;
int i;

if (dev->dma_coherent)
return;

for_each_sg(sg, s, nents, i)
__dma_page_dev_to_cpu(sg_page(s), s->offset, s->length, dir);

Expand All @@ -1360,6 +1363,9 @@ static void arm_iommu_sync_sg_for_device(struct device *dev,
struct scatterlist *s;
int i;

if (dev->dma_coherent)
return;

for_each_sg(sg, s, nents, i)
__dma_page_cpu_to_dev(sg_page(s), s->offset, s->length, dir);
}
Expand Down Expand Up @@ -1493,12 +1499,13 @@ static void arm_iommu_sync_single_for_cpu(struct device *dev,
{
struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev);
dma_addr_t iova = handle & PAGE_MASK;
struct page *page = phys_to_page(iommu_iova_to_phys(mapping->domain, iova));
struct page *page;
unsigned int offset = handle & ~PAGE_MASK;

if (!iova)
if (dev->dma_coherent || !iova)
return;

page = phys_to_page(iommu_iova_to_phys(mapping->domain, iova));
__dma_page_dev_to_cpu(page, offset, size, dir);
}

Expand All @@ -1507,12 +1514,13 @@ static void arm_iommu_sync_single_for_device(struct device *dev,
{
struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev);
dma_addr_t iova = handle & PAGE_MASK;
struct page *page = phys_to_page(iommu_iova_to_phys(mapping->domain, iova));
struct page *page;
unsigned int offset = handle & ~PAGE_MASK;

if (!iova)
if (dev->dma_coherent || !iova)
return;

page = phys_to_page(iommu_iova_to_phys(mapping->domain, iova));
__dma_page_cpu_to_dev(page, offset, size, dir);
}

Expand All @@ -1536,22 +1544,6 @@ static const struct dma_map_ops iommu_ops = {
.unmap_resource = arm_iommu_unmap_resource,
};

static const struct dma_map_ops iommu_coherent_ops = {
.alloc = arm_iommu_alloc_attrs,
.free = arm_iommu_free_attrs,
.mmap = arm_iommu_mmap_attrs,
.get_sgtable = arm_iommu_get_sgtable,

.map_page = arm_iommu_map_page,
.unmap_page = arm_iommu_unmap_page,

.map_sg = arm_iommu_map_sg,
.unmap_sg = arm_iommu_unmap_sg,

.map_resource = arm_iommu_map_resource,
.unmap_resource = arm_iommu_unmap_resource,
};

/**
* arm_iommu_create_mapping
* @bus: pointer to the bus holding the client device (for IOMMU calls)
Expand Down Expand Up @@ -1750,10 +1742,7 @@ static void arm_setup_iommu_dma_ops(struct device *dev, u64 dma_base, u64 size,
return;
}

if (coherent)
set_dma_ops(dev, &iommu_coherent_ops);
else
set_dma_ops(dev, &iommu_ops);
set_dma_ops(dev, &iommu_ops);
}

static void arm_teardown_iommu_dma_ops(struct device *dev)
Expand Down

0 comments on commit 4136ce9

Please sign in to comment.