Skip to content

Commit

Permalink
Merge tag 'dma-mapping-5.18' of git://git.infradead.org/users/hch/dma…
Browse files Browse the repository at this point in the history
…-mapping

Pull dma-mapping updates from Christoph Hellwig:

 - do not zero buffer in set_memory_decrypted (Kirill A. Shutemov)

 - fix return value of dma-debug __setup handlers (Randy Dunlap)

 - swiotlb cleanups (Robin Murphy)

 - remove most remaining users of the pci-dma-compat.h API
   (Christophe JAILLET)

 - share the ABI header for the DMA map_benchmark with userspace
   (Tian Tao)

 - update the maintainer for DMA MAPPING BENCHMARK (Xiang Chen)

 - remove CONFIG_DMA_REMAP (me)

* tag 'dma-mapping-5.18' of git://git.infradead.org/users/hch/dma-mapping:
  dma-mapping: benchmark: extract a common header file for map_benchmark definition
  dma-debug: fix return value of __setup handlers
  dma-mapping: remove CONFIG_DMA_REMAP
  media: v4l2-pci-skeleton: Remove usage of the deprecated "pci-dma-compat.h" API
  rapidio/tsi721: Remove usage of the deprecated "pci-dma-compat.h" API
  sparc: Remove usage of the deprecated "pci-dma-compat.h" API
  agp/intel: Remove usage of the deprecated "pci-dma-compat.h" API
  alpha: Remove usage of the deprecated "pci-dma-compat.h" API
  MAINTAINERS: update maintainer list of DMA MAPPING BENCHMARK
  swiotlb: simplify array allocation
  swiotlb: tidy up includes
  swiotlb: simplify debugfs setup
  swiotlb: do not zero buffer in set_memory_decrypted()
  • Loading branch information
torvalds committed Mar 29, 2022
2 parents 37fcacb + 8ddde07 commit 9ae2a14
Show file tree
Hide file tree
Showing 18 changed files with 105 additions and 160 deletions.
2 changes: 1 addition & 1 deletion MAINTAINERS
Original file line number Diff line number Diff line change
Expand Up @@ -5880,7 +5880,7 @@ F: include/linux/dma-map-ops.h
F: kernel/dma/

DMA MAPPING BENCHMARK
M: Barry Song <song.bao.hua@hisilicon.com>
M: Xiang Chen <chenxiang66@hisilicon.com>
L: [email protected]
F: kernel/dma/map_benchmark.c
F: tools/testing/selftests/dma/
Expand Down
7 changes: 4 additions & 3 deletions arch/alpha/include/asm/floppy.h
Original file line number Diff line number Diff line change
Expand Up @@ -43,17 +43,18 @@ alpha_fd_dma_setup(char *addr, unsigned long size, int mode, int io)
static int prev_dir;
int dir;

dir = (mode != DMA_MODE_READ) ? PCI_DMA_FROMDEVICE : PCI_DMA_TODEVICE;
dir = (mode != DMA_MODE_READ) ? DMA_FROM_DEVICE : DMA_TO_DEVICE;

if (bus_addr
&& (addr != prev_addr || size != prev_size || dir != prev_dir)) {
/* different from last time -- unmap prev */
pci_unmap_single(isa_bridge, bus_addr, prev_size, prev_dir);
dma_unmap_single(&isa_bridge->dev, bus_addr, prev_size,
prev_dir);
bus_addr = 0;
}

if (!bus_addr) /* need to map it */
bus_addr = pci_map_single(isa_bridge, addr, size, dir);
bus_addr = dma_map_single(&isa_bridge->dev, addr, size, dir);

/* remember this one as prev */
prev_addr = addr;
Expand Down
12 changes: 6 additions & 6 deletions arch/alpha/kernel/pci_iommu.c
Original file line number Diff line number Diff line change
Expand Up @@ -333,7 +333,7 @@ static dma_addr_t alpha_pci_map_page(struct device *dev, struct page *page,
struct pci_dev *pdev = alpha_gendev_to_pci(dev);
int dac_allowed;

BUG_ON(dir == PCI_DMA_NONE);
BUG_ON(dir == DMA_NONE);

dac_allowed = pdev ? pci_dac_dma_supported(pdev, pdev->dma_mask) : 0;
return pci_map_single_1(pdev, (char *)page_address(page) + offset,
Expand All @@ -356,7 +356,7 @@ static void alpha_pci_unmap_page(struct device *dev, dma_addr_t dma_addr,
struct pci_iommu_arena *arena;
long dma_ofs, npages;

BUG_ON(dir == PCI_DMA_NONE);
BUG_ON(dir == DMA_NONE);

if (dma_addr >= __direct_map_base
&& dma_addr < __direct_map_base + __direct_map_size) {
Expand Down Expand Up @@ -460,7 +460,7 @@ static void alpha_pci_free_coherent(struct device *dev, size_t size,
unsigned long attrs)
{
struct pci_dev *pdev = alpha_gendev_to_pci(dev);
pci_unmap_single(pdev, dma_addr, size, PCI_DMA_BIDIRECTIONAL);
dma_unmap_single(&pdev->dev, dma_addr, size, DMA_BIDIRECTIONAL);
free_pages((unsigned long)cpu_addr, get_order(size));

DBGA2("pci_free_consistent: [%llx,%zx] from %ps\n",
Expand Down Expand Up @@ -639,7 +639,7 @@ static int alpha_pci_map_sg(struct device *dev, struct scatterlist *sg,
dma_addr_t max_dma;
int dac_allowed;

BUG_ON(dir == PCI_DMA_NONE);
BUG_ON(dir == DMA_NONE);

dac_allowed = dev ? pci_dac_dma_supported(pdev, pdev->dma_mask) : 0;

Expand Down Expand Up @@ -702,7 +702,7 @@ static int alpha_pci_map_sg(struct device *dev, struct scatterlist *sg,
/* Some allocation failed while mapping the scatterlist
entries. Unmap them now. */
if (out > start)
pci_unmap_sg(pdev, start, out - start, dir);
dma_unmap_sg(&pdev->dev, start, out - start, dir);
return -ENOMEM;
}

Expand All @@ -722,7 +722,7 @@ static void alpha_pci_unmap_sg(struct device *dev, struct scatterlist *sg,
dma_addr_t max_dma;
dma_addr_t fbeg, fend;

BUG_ON(dir == PCI_DMA_NONE);
BUG_ON(dir == DMA_NONE);

if (! alpha_mv.mv_pci_tbi)
return;
Expand Down
2 changes: 1 addition & 1 deletion arch/arm/Kconfig
Original file line number Diff line number Diff line change
Expand Up @@ -49,7 +49,7 @@ config ARM
select DMA_DECLARE_COHERENT
select DMA_GLOBAL_POOL if !MMU
select DMA_OPS
select DMA_REMAP if MMU
select DMA_NONCOHERENT_MMAP if MMU
select EDAC_SUPPORT
select EDAC_ATOMIC_SCRUB
select GENERIC_ALLOCATOR
Expand Down
2 changes: 1 addition & 1 deletion arch/sparc/kernel/ioport.c
Original file line number Diff line number Diff line change
Expand Up @@ -309,7 +309,7 @@ arch_initcall(sparc_register_ioport);
void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size,
enum dma_data_direction dir)
{
if (dir != PCI_DMA_TODEVICE &&
if (dir != DMA_TO_DEVICE &&
sparc_cpu_model == sparc_leon &&
!sparc_leon3_snooping_enabled())
leon_flush_dcache_all();
Expand Down
2 changes: 1 addition & 1 deletion arch/xtensa/Kconfig
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,7 @@ config XTENSA
select BUILDTIME_TABLE_SORT
select CLONE_BACKWARDS
select COMMON_CLK
select DMA_REMAP if MMU
select DMA_NONCOHERENT_MMAP if MMU
select GENERIC_ATOMIC64
select GENERIC_IRQ_SHOW
select GENERIC_LIB_CMPDI2
Expand Down
26 changes: 13 additions & 13 deletions drivers/char/agp/intel-gtt.c
Original file line number Diff line number Diff line change
Expand Up @@ -111,8 +111,8 @@ static int intel_gtt_map_memory(struct page **pages,
for_each_sg(st->sgl, sg, num_entries, i)
sg_set_page(sg, pages[i], PAGE_SIZE, 0);

if (!pci_map_sg(intel_private.pcidev,
st->sgl, st->nents, PCI_DMA_BIDIRECTIONAL))
if (!dma_map_sg(&intel_private.pcidev->dev, st->sgl, st->nents,
DMA_BIDIRECTIONAL))
goto err;

return 0;
Expand All @@ -127,8 +127,8 @@ static void intel_gtt_unmap_memory(struct scatterlist *sg_list, int num_sg)
struct sg_table st;
DBG("try unmapping %lu pages\n", (unsigned long)mem->page_count);

pci_unmap_sg(intel_private.pcidev, sg_list,
num_sg, PCI_DMA_BIDIRECTIONAL);
dma_unmap_sg(&intel_private.pcidev->dev, sg_list, num_sg,
DMA_BIDIRECTIONAL);

st.sgl = sg_list;
st.orig_nents = st.nents = num_sg;
Expand Down Expand Up @@ -303,9 +303,9 @@ static int intel_gtt_setup_scratch_page(void)
set_pages_uc(page, 1);

if (intel_private.needs_dmar) {
dma_addr = pci_map_page(intel_private.pcidev, page, 0,
PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
if (pci_dma_mapping_error(intel_private.pcidev, dma_addr)) {
dma_addr = dma_map_page(&intel_private.pcidev->dev, page, 0,
PAGE_SIZE, DMA_BIDIRECTIONAL);
if (dma_mapping_error(&intel_private.pcidev->dev, dma_addr)) {
__free_page(page);
return -EINVAL;
}
Expand Down Expand Up @@ -552,9 +552,9 @@ static void intel_gtt_teardown_scratch_page(void)
{
set_pages_wb(intel_private.scratch_page, 1);
if (intel_private.needs_dmar)
pci_unmap_page(intel_private.pcidev,
intel_private.scratch_page_dma,
PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
dma_unmap_page(&intel_private.pcidev->dev,
intel_private.scratch_page_dma, PAGE_SIZE,
DMA_BIDIRECTIONAL);
__free_page(intel_private.scratch_page);
}

Expand Down Expand Up @@ -1412,13 +1412,13 @@ int intel_gmch_probe(struct pci_dev *bridge_pdev, struct pci_dev *gpu_pdev,

if (bridge) {
mask = intel_private.driver->dma_mask_size;
if (pci_set_dma_mask(intel_private.pcidev, DMA_BIT_MASK(mask)))
if (dma_set_mask(&intel_private.pcidev->dev, DMA_BIT_MASK(mask)))
dev_err(&intel_private.pcidev->dev,
"set gfx device dma mask %d-bit failed!\n",
mask);
else
pci_set_consistent_dma_mask(intel_private.pcidev,
DMA_BIT_MASK(mask));
dma_set_coherent_mask(&intel_private.pcidev->dev,
DMA_BIT_MASK(mask));
}

if (intel_gtt_init() != 0) {
Expand Down
14 changes: 5 additions & 9 deletions drivers/iommu/dma-iommu.c
Original file line number Diff line number Diff line change
Expand Up @@ -856,7 +856,6 @@ static void *iommu_dma_alloc_remap(struct device *dev, size_t size,
return NULL;
}

#ifdef CONFIG_DMA_REMAP
static struct sg_table *iommu_dma_alloc_noncontiguous(struct device *dev,
size_t size, enum dma_data_direction dir, gfp_t gfp,
unsigned long attrs)
Expand Down Expand Up @@ -886,7 +885,6 @@ static void iommu_dma_free_noncontiguous(struct device *dev, size_t size,
sg_free_table(&sh->sgt);
kfree(sh);
}
#endif /* CONFIG_DMA_REMAP */

static void iommu_dma_sync_single_for_cpu(struct device *dev,
dma_addr_t dma_handle, size_t size, enum dma_data_direction dir)
Expand Down Expand Up @@ -1280,7 +1278,7 @@ static void __iommu_dma_free(struct device *dev, size_t size, void *cpu_addr)
dma_free_from_pool(dev, cpu_addr, alloc_size))
return;

if (IS_ENABLED(CONFIG_DMA_REMAP) && is_vmalloc_addr(cpu_addr)) {
if (is_vmalloc_addr(cpu_addr)) {
/*
* If it the address is remapped, then it's either non-coherent
* or highmem CMA, or an iommu_dma_alloc_remap() construction.
Expand Down Expand Up @@ -1322,7 +1320,7 @@ static void *iommu_dma_alloc_pages(struct device *dev, size_t size,
if (!page)
return NULL;

if (IS_ENABLED(CONFIG_DMA_REMAP) && (!coherent || PageHighMem(page))) {
if (!coherent || PageHighMem(page)) {
pgprot_t prot = dma_pgprot(dev, PAGE_KERNEL, attrs);

cpu_addr = dma_common_contiguous_remap(page, alloc_size,
Expand Down Expand Up @@ -1354,7 +1352,7 @@ static void *iommu_dma_alloc(struct device *dev, size_t size,

gfp |= __GFP_ZERO;

if (IS_ENABLED(CONFIG_DMA_REMAP) && gfpflags_allow_blocking(gfp) &&
if (gfpflags_allow_blocking(gfp) &&
!(attrs & DMA_ATTR_FORCE_CONTIGUOUS)) {
return iommu_dma_alloc_remap(dev, size, handle, gfp,
dma_pgprot(dev, PAGE_KERNEL, attrs), attrs);
Expand Down Expand Up @@ -1395,7 +1393,7 @@ static int iommu_dma_mmap(struct device *dev, struct vm_area_struct *vma,
if (off >= nr_pages || vma_pages(vma) > nr_pages - off)
return -ENXIO;

if (IS_ENABLED(CONFIG_DMA_REMAP) && is_vmalloc_addr(cpu_addr)) {
if (is_vmalloc_addr(cpu_addr)) {
struct page **pages = dma_common_find_pages(cpu_addr);

if (pages)
Expand All @@ -1417,7 +1415,7 @@ static int iommu_dma_get_sgtable(struct device *dev, struct sg_table *sgt,
struct page *page;
int ret;

if (IS_ENABLED(CONFIG_DMA_REMAP) && is_vmalloc_addr(cpu_addr)) {
if (is_vmalloc_addr(cpu_addr)) {
struct page **pages = dma_common_find_pages(cpu_addr);

if (pages) {
Expand Down Expand Up @@ -1449,10 +1447,8 @@ static const struct dma_map_ops iommu_dma_ops = {
.free = iommu_dma_free,
.alloc_pages = dma_common_alloc_pages,
.free_pages = dma_common_free_pages,
#ifdef CONFIG_DMA_REMAP
.alloc_noncontiguous = iommu_dma_alloc_noncontiguous,
.free_noncontiguous = iommu_dma_free_noncontiguous,
#endif
.mmap = iommu_dma_mmap,
.get_sgtable = iommu_dma_get_sgtable,
.map_page = iommu_dma_map_page,
Expand Down
8 changes: 4 additions & 4 deletions drivers/rapidio/devices/tsi721.c
Original file line number Diff line number Diff line change
Expand Up @@ -2836,17 +2836,17 @@ static int tsi721_probe(struct pci_dev *pdev,
}

/* Configure DMA attributes. */
if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(64))) {
err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
if (err) {
tsi_err(&pdev->dev, "Unable to set DMA mask");
goto err_unmap_bars;
}

if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)))
if (dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32)))
tsi_info(&pdev->dev, "Unable to set consistent DMA mask");
} else {
err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
if (err)
tsi_info(&pdev->dev, "Unable to set consistent DMA mask");
}
Expand Down
31 changes: 31 additions & 0 deletions include/linux/map_benchmark.h
Original file line number Diff line number Diff line change
@@ -0,0 +1,31 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) 2022 HiSilicon Limited.
*/

#ifndef _KERNEL_DMA_BENCHMARK_H
#define _KERNEL_DMA_BENCHMARK_H

#define DMA_MAP_BENCHMARK _IOWR('d', 1, struct map_benchmark)
#define DMA_MAP_MAX_THREADS 1024
#define DMA_MAP_MAX_SECONDS 300
#define DMA_MAP_MAX_TRANS_DELAY (10 * NSEC_PER_MSEC)

#define DMA_MAP_BIDIRECTIONAL 0
#define DMA_MAP_TO_DEVICE 1
#define DMA_MAP_FROM_DEVICE 2

struct map_benchmark {
__u64 avg_map_100ns; /* average map latency in 100ns */
__u64 map_stddev; /* standard deviation of map latency */
__u64 avg_unmap_100ns; /* as above */
__u64 unmap_stddev;
__u32 threads; /* how many threads will do map/unmap in parallel */
__u32 seconds; /* how long the test will last */
__s32 node; /* which numa node this benchmark will run on */
__u32 dma_bits; /* DMA addressing capability */
__u32 dma_dir; /* DMA data direction */
__u32 dma_trans_ns; /* time for DMA transmission in ns */
__u32 granule; /* how many PAGE_SIZE will do map/unmap once a time */
};
#endif /* _KERNEL_DMA_BENCHMARK_H */
7 changes: 1 addition & 6 deletions kernel/dma/Kconfig
Original file line number Diff line number Diff line change
Expand Up @@ -110,15 +110,10 @@ config DMA_GLOBAL_POOL
select DMA_DECLARE_COHERENT
bool

config DMA_REMAP
bool
depends on MMU
select DMA_NONCOHERENT_MMAP

config DMA_DIRECT_REMAP
bool
select DMA_REMAP
select DMA_COHERENT_POOL
select DMA_NONCOHERENT_MMAP

config DMA_CMA
bool "DMA Contiguous Memory Allocator"
Expand Down
2 changes: 1 addition & 1 deletion kernel/dma/Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -8,5 +8,5 @@ obj-$(CONFIG_DMA_DECLARE_COHERENT) += coherent.o
obj-$(CONFIG_DMA_API_DEBUG) += debug.o
obj-$(CONFIG_SWIOTLB) += swiotlb.o
obj-$(CONFIG_DMA_COHERENT_POOL) += pool.o
obj-$(CONFIG_DMA_REMAP) += remap.o
obj-$(CONFIG_MMU) += remap.o
obj-$(CONFIG_DMA_MAP_BENCHMARK) += map_benchmark.o
4 changes: 2 additions & 2 deletions kernel/dma/debug.c
Original file line number Diff line number Diff line change
Expand Up @@ -927,7 +927,7 @@ static __init int dma_debug_cmdline(char *str)
global_disable = true;
}

return 0;
return 1;
}

static __init int dma_debug_entries_cmdline(char *str)
Expand All @@ -936,7 +936,7 @@ static __init int dma_debug_entries_cmdline(char *str)
return -EINVAL;
if (!get_option(&str, &nr_prealloc_entries))
nr_prealloc_entries = PREALLOC_DMA_DEBUG_ENTRIES;
return 0;
return 1;
}

__setup("dma_debug=", dma_debug_cmdline);
Expand Down
18 changes: 7 additions & 11 deletions kernel/dma/direct.c
Original file line number Diff line number Diff line change
Expand Up @@ -265,17 +265,13 @@ void *dma_direct_alloc(struct device *dev, size_t size,
page = __dma_direct_alloc_pages(dev, size, gfp & ~__GFP_ZERO);
if (!page)
return NULL;

/*
* dma_alloc_contiguous can return highmem pages depending on a
* combination the cma= arguments and per-arch setup. These need to be
* remapped to return a kernel virtual address.
*/
if (PageHighMem(page)) {
/*
* Depending on the cma= arguments and per-arch setup,
* dma_alloc_contiguous could return highmem pages.
* Without remapping there is no way to return them here, so
* log an error and fail.
*/
if (!IS_ENABLED(CONFIG_DMA_REMAP)) {
dev_info(dev, "Rejecting highmem page from CMA.\n");
goto out_free_pages;
}
remap = true;
set_uncached = false;
}
Expand Down Expand Up @@ -349,7 +345,7 @@ void dma_direct_free(struct device *dev, size_t size,
dma_free_from_pool(dev, cpu_addr, PAGE_ALIGN(size)))
return;

if (IS_ENABLED(CONFIG_DMA_REMAP) && is_vmalloc_addr(cpu_addr)) {
if (is_vmalloc_addr(cpu_addr)) {
vunmap(cpu_addr);
} else {
if (IS_ENABLED(CONFIG_ARCH_HAS_DMA_CLEAR_UNCACHED))
Expand Down
Loading

0 comments on commit 9ae2a14

Please sign in to comment.