Skip to content

Commit

Permalink
igb: update code to better handle incrementing page count
Browse files Browse the repository at this point in the history
Update the driver code so that we do bulk updates of the page reference
count instead of just incrementing it by one reference at a time.  The
advantage to doing this is that we cut down on atomic operations and
this in turn should give us a slight improvement in cycles per packet.
In addition if we eventually move this over to using build_skb the gains
will be more noticeable.

Link: http://lkml.kernel.org/r/[email protected]
Signed-off-by: Alexander Duyck <[email protected]>
Acked-by: Jeff Kirsher <[email protected]>
Cc: "David S. Miller" <[email protected]>
Cc: "James E.J. Bottomley" <[email protected]>
Cc: Chris Metcalf <[email protected]>
Cc: David Howells <[email protected]>
Cc: Geert Uytterhoeven <[email protected]>
Cc: Hans-Christian Noren Egtvedt <[email protected]>
Cc: Helge Deller <[email protected]>
Cc: James Hogan <[email protected]>
Cc: Jonas Bonn <[email protected]>
Cc: Keguang Zhang <[email protected]>
Cc: Ley Foon Tan <[email protected]>
Cc: Mark Salter <[email protected]>
Cc: Max Filippov <[email protected]>
Cc: Michael Ellerman <[email protected]>
Cc: Michal Simek <[email protected]>
Cc: Ralf Baechle <[email protected]>
Cc: Rich Felker <[email protected]>
Cc: Richard Kuo <[email protected]>
Cc: Russell King <[email protected]>
Cc: Steven Miao <[email protected]>
Cc: Tobias Klauser <[email protected]>
Cc: Vineet Gupta <[email protected]>
Cc: Yoshinori Sato <[email protected]>
Signed-off-by: Andrew Morton <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]>
  • Loading branch information
Alexander Duyck authored and torvalds committed Dec 15, 2016
1 parent 5be5955 commit bd4171a
Show file tree
Hide file tree
Showing 2 changed files with 23 additions and 8 deletions.
7 changes: 6 additions & 1 deletion drivers/net/ethernet/intel/igb/igb.h
Original file line number Diff line number Diff line change
Expand Up @@ -210,7 +210,12 @@ struct igb_tx_buffer {
struct igb_rx_buffer {
dma_addr_t dma;
struct page *page;
unsigned int page_offset;
#if (BITS_PER_LONG > 32) || (PAGE_SIZE >= 65536)
__u32 page_offset;
#else
__u16 page_offset;
#endif
__u16 pagecnt_bias;
};

struct igb_tx_queue_stats {
Expand Down
24 changes: 17 additions & 7 deletions drivers/net/ethernet/intel/igb/igb_main.c
Original file line number Diff line number Diff line change
Expand Up @@ -3962,7 +3962,8 @@ static void igb_clean_rx_ring(struct igb_ring *rx_ring)
PAGE_SIZE,
DMA_FROM_DEVICE,
DMA_ATTR_SKIP_CPU_SYNC);
__free_page(buffer_info->page);
__page_frag_drain(buffer_info->page, 0,
buffer_info->pagecnt_bias);

buffer_info->page = NULL;
}
Expand Down Expand Up @@ -6834,13 +6835,15 @@ static bool igb_can_reuse_rx_page(struct igb_rx_buffer *rx_buffer,
struct page *page,
unsigned int truesize)
{
unsigned int pagecnt_bias = rx_buffer->pagecnt_bias--;

/* avoid re-using remote pages */
if (unlikely(igb_page_is_reserved(page)))
return false;

#if (PAGE_SIZE < 8192)
/* if we are only owner of page we can reuse it */
if (unlikely(page_count(page) != 1))
if (unlikely(page_ref_count(page) != pagecnt_bias))
return false;

/* flip page offset to other buffer */
Expand All @@ -6853,10 +6856,14 @@ static bool igb_can_reuse_rx_page(struct igb_rx_buffer *rx_buffer,
return false;
#endif

/* Even if we own the page, we are not allowed to use atomic_set()
* This would break get_page_unless_zero() users.
/* If we have drained the page fragment pool we need to update
* the pagecnt_bias and page count so that we fully restock the
* number of references the driver holds.
*/
page_ref_inc(page);
if (unlikely(pagecnt_bias == 1)) {
page_ref_add(page, USHRT_MAX);
rx_buffer->pagecnt_bias = USHRT_MAX;
}

return true;
}
Expand Down Expand Up @@ -6908,7 +6915,6 @@ static bool igb_add_rx_frag(struct igb_ring *rx_ring,
return true;

/* this page cannot be reused so discard it */
__free_page(page);
return false;
}

Expand Down Expand Up @@ -6979,10 +6985,13 @@ static struct sk_buff *igb_fetch_rx_buffer(struct igb_ring *rx_ring,
/* hand second half of page back to the ring */
igb_reuse_rx_page(rx_ring, rx_buffer);
} else {
/* we are not reusing the buffer so unmap it */
/* We are not reusing the buffer so unmap it and free
* any references we are holding to it
*/
dma_unmap_page_attrs(rx_ring->dev, rx_buffer->dma,
PAGE_SIZE, DMA_FROM_DEVICE,
DMA_ATTR_SKIP_CPU_SYNC);
__page_frag_drain(page, 0, rx_buffer->pagecnt_bias);
}

/* clear contents of rx_buffer */
Expand Down Expand Up @@ -7256,6 +7265,7 @@ static bool igb_alloc_mapped_page(struct igb_ring *rx_ring,
bi->dma = dma;
bi->page = page;
bi->page_offset = 0;
bi->pagecnt_bias = 1;

return true;
}
Expand Down

0 comments on commit bd4171a

Please sign in to comment.