Skip to content

Commit

Permalink
xsk: Use dma_need_sync instead of reimplenting it
Browse files Browse the repository at this point in the history
Use the dma_need_sync helper instead of (not always entirely correctly)
poking into the dma-mapping internals.

Signed-off-by: Christoph Hellwig <[email protected]>
Signed-off-by: Daniel Borkmann <[email protected]>
Link: https://lore.kernel.org/bpf/[email protected]
  • Loading branch information
Christoph Hellwig authored and borkmann committed Jun 30, 2020
1 parent 53937ff commit 7e02457
Showing 1 changed file with 3 additions and 47 deletions.
50 changes: 3 additions & 47 deletions net/xdp/xsk_buff_pool.c
Original file line number Diff line number Diff line change
Expand Up @@ -2,9 +2,6 @@

#include <net/xsk_buff_pool.h>
#include <net/xdp_sock.h>
#include <linux/dma-direct.h>
#include <linux/dma-noncoherent.h>
#include <linux/swiotlb.h>

#include "xsk_queue.h"

Expand Down Expand Up @@ -124,48 +121,6 @@ static void xp_check_dma_contiguity(struct xsk_buff_pool *pool)
}
}

static bool __maybe_unused xp_check_swiotlb_dma(struct xsk_buff_pool *pool)
{
#if defined(CONFIG_SWIOTLB)
phys_addr_t paddr;
u32 i;

for (i = 0; i < pool->dma_pages_cnt; i++) {
paddr = dma_to_phys(pool->dev, pool->dma_pages[i]);
if (is_swiotlb_buffer(paddr))
return false;
}
#endif
return true;
}

static bool xp_check_cheap_dma(struct xsk_buff_pool *pool)
{
#if defined(CONFIG_HAS_DMA)
const struct dma_map_ops *ops = get_dma_ops(pool->dev);

if (ops) {
return !ops->sync_single_for_cpu &&
!ops->sync_single_for_device;
}

if (!dma_is_direct(ops))
return false;

if (!xp_check_swiotlb_dma(pool))
return false;

if (!dev_is_dma_coherent(pool->dev)) {
#if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU) || \
defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU_ALL) || \
defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_DEVICE)
return false;
#endif
}
#endif
return true;
}

int xp_dma_map(struct xsk_buff_pool *pool, struct device *dev,
unsigned long attrs, struct page **pages, u32 nr_pages)
{
Expand All @@ -179,6 +134,7 @@ int xp_dma_map(struct xsk_buff_pool *pool, struct device *dev,

pool->dev = dev;
pool->dma_pages_cnt = nr_pages;
pool->dma_need_sync = false;

for (i = 0; i < pool->dma_pages_cnt; i++) {
dma = dma_map_page_attrs(dev, pages[i], 0, PAGE_SIZE,
Expand All @@ -187,13 +143,13 @@ int xp_dma_map(struct xsk_buff_pool *pool, struct device *dev,
xp_dma_unmap(pool, attrs);
return -ENOMEM;
}
if (dma_need_sync(dev, dma))
pool->dma_need_sync = true;
pool->dma_pages[i] = dma;
}

if (pool->unaligned)
xp_check_dma_contiguity(pool);

pool->dma_need_sync = !xp_check_cheap_dma(pool);
return 0;
}
EXPORT_SYMBOL(xp_dma_map);
Expand Down

0 comments on commit 7e02457

Please sign in to comment.