Skip to content

Commit

Permalink
net: page_pool: refactor dma_map into own function page_pool_dma_map
Browse files Browse the repository at this point in the history
In preparation for next patch, move the dma mapping into its own function,
as this will make it easier to follow the changes.

[ilias.apalodimas: make page_pool_dma_map return boolean]

Link: https://lkml.kernel.org/r/[email protected]
Signed-off-by: Jesper Dangaard Brouer <[email protected]>
Signed-off-by: Mel Gorman <[email protected]>
Reviewed-by: Ilias Apalodimas <[email protected]>
Reviewed-by: Alexander Lobakin <[email protected]>
Cc: Alexander Duyck <[email protected]>
Cc: Christoph Hellwig <[email protected]>
Cc: Chuck Lever <[email protected]>
Cc: David Miller <[email protected]>
Cc: Matthew Wilcox (Oracle) <[email protected]>
Cc: Vlastimil Babka <[email protected]>
Signed-off-by: Andrew Morton <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]>
  • Loading branch information
netoptimizer authored and torvalds committed Apr 30, 2021
1 parent f6e70aa commit dfa5971
Showing 1 changed file with 26 additions and 19 deletions.
45 changes: 26 additions & 19 deletions net/core/page_pool.c
Original file line number Diff line number Diff line change
Expand Up @@ -180,14 +180,37 @@ static void page_pool_dma_sync_for_device(struct page_pool *pool,
pool->p.dma_dir);
}

static bool page_pool_dma_map(struct page_pool *pool, struct page *page)
{
dma_addr_t dma;

/* Setup DMA mapping: use 'struct page' area for storing DMA-addr
* since dma_addr_t can be either 32 or 64 bits and does not always fit
* into page private data (i.e 32bit cpu with 64bit DMA caps)
* This mapping is kept for lifetime of page, until leaving pool.
*/
dma = dma_map_page_attrs(pool->p.dev, page, 0,
(PAGE_SIZE << pool->p.order),
pool->p.dma_dir, DMA_ATTR_SKIP_CPU_SYNC);
if (dma_mapping_error(pool->p.dev, dma))
return false;

page->dma_addr = dma;

if (pool->p.flags & PP_FLAG_DMA_SYNC_DEV)
page_pool_dma_sync_for_device(pool, page, pool->p.max_len);

return true;
}

/* slow path */
noinline
static struct page *__page_pool_alloc_pages_slow(struct page_pool *pool,
gfp_t _gfp)
{
unsigned int pp_flags = pool->p.flags;
struct page *page;
gfp_t gfp = _gfp;
dma_addr_t dma;

/* We could always set __GFP_COMP, and avoid this branch, as
* prep_new_page() can handle order-0 with __GFP_COMP.
Expand All @@ -211,30 +234,14 @@ static struct page *__page_pool_alloc_pages_slow(struct page_pool *pool,
if (!page)
return NULL;

if (!(pool->p.flags & PP_FLAG_DMA_MAP))
goto skip_dma_map;

/* Setup DMA mapping: use 'struct page' area for storing DMA-addr
* since dma_addr_t can be either 32 or 64 bits and does not always fit
* into page private data (i.e 32bit cpu with 64bit DMA caps)
* This mapping is kept for lifetime of page, until leaving pool.
*/
dma = dma_map_page_attrs(pool->p.dev, page, 0,
(PAGE_SIZE << pool->p.order),
pool->p.dma_dir, DMA_ATTR_SKIP_CPU_SYNC);
if (dma_mapping_error(pool->p.dev, dma)) {
if ((pp_flags & PP_FLAG_DMA_MAP) &&
unlikely(!page_pool_dma_map(pool, page))) {
put_page(page);
return NULL;
}
page->dma_addr = dma;

if (pool->p.flags & PP_FLAG_DMA_SYNC_DEV)
page_pool_dma_sync_for_device(pool, page, pool->p.max_len);

skip_dma_map:
/* Track how many pages are held 'in-flight' */
pool->pages_state_hold_cnt++;

trace_page_pool_state_hold(pool, page, pool->pages_state_hold_cnt);

/* When page just alloc'ed is should/must have refcnt 1. */
Expand Down

0 comments on commit dfa5971

Please sign in to comment.