Skip to content

Commit

Permalink
net: mv643xx_eth: Fix highmem support in non-TSO egress path
Browse files Browse the repository at this point in the history
Commit 69ad0dd
Author: Ezequiel Garcia <[email protected]>
Date:   Mon May 19 13:59:59 2014 -0300

    net: mv643xx_eth: Use dma_map_single() to map the skb fragments

caused a nasty regression by removing the support for highmem skb
fragments. By using page_address() to get the address of a fragment's
page, we are assuming a lowmem page. However, such assumption is incorrect,
as fragments can be in highmem pages, resulting in very nasty issues.

This commit fixes this by using the skb_frag_dma_map() helper,
which takes care of mapping the skb fragment properly. Additionally,
the type of mapping is now tracked, so it can be unmapped using
dma_unmap_page or dma_unmap_single when appropriate.

This commit also fixes the error path in txq_init() to release the
resources properly.

Fixes: 69ad0dd ("net: mv643xx_eth: Use dma_map_single() to map the skb fragments")
Reported-by: Russell King <[email protected]>
Signed-off-by: Ezequiel Garcia <[email protected]>
Signed-off-by: David S. Miller <[email protected]>
  • Loading branch information
ezequielgarcia authored and davem330 committed Jan 27, 2015
1 parent 9d08da9 commit 9e91141
Showing 1 changed file with 49 additions and 10 deletions.
59 changes: 49 additions & 10 deletions drivers/net/ethernet/marvell/mv643xx_eth.c
Original file line number Diff line number Diff line change
Expand Up @@ -192,6 +192,10 @@ static char mv643xx_eth_driver_version[] = "1.4";
#define IS_TSO_HEADER(txq, addr) \
((addr >= txq->tso_hdrs_dma) && \
(addr < txq->tso_hdrs_dma + txq->tx_ring_size * TSO_HEADER_SIZE))

#define DESC_DMA_MAP_SINGLE 0
#define DESC_DMA_MAP_PAGE 1

/*
* RX/TX descriptors.
*/
Expand Down Expand Up @@ -362,6 +366,7 @@ struct tx_queue {
dma_addr_t tso_hdrs_dma;

struct tx_desc *tx_desc_area;
char *tx_desc_mapping; /* array to track the type of the dma mapping */
dma_addr_t tx_desc_dma;
int tx_desc_area_size;

Expand Down Expand Up @@ -750,6 +755,7 @@ txq_put_data_tso(struct net_device *dev, struct tx_queue *txq,
if (txq->tx_curr_desc == txq->tx_ring_size)
txq->tx_curr_desc = 0;
desc = &txq->tx_desc_area[tx_index];
txq->tx_desc_mapping[tx_index] = DESC_DMA_MAP_SINGLE;

desc->l4i_chk = 0;
desc->byte_cnt = length;
Expand Down Expand Up @@ -879,14 +885,13 @@ static void txq_submit_frag_skb(struct tx_queue *txq, struct sk_buff *skb)
skb_frag_t *this_frag;
int tx_index;
struct tx_desc *desc;
void *addr;

this_frag = &skb_shinfo(skb)->frags[frag];
addr = page_address(this_frag->page.p) + this_frag->page_offset;
tx_index = txq->tx_curr_desc++;
if (txq->tx_curr_desc == txq->tx_ring_size)
txq->tx_curr_desc = 0;
desc = &txq->tx_desc_area[tx_index];
txq->tx_desc_mapping[tx_index] = DESC_DMA_MAP_PAGE;

/*
* The last fragment will generate an interrupt
Expand All @@ -902,8 +907,9 @@ static void txq_submit_frag_skb(struct tx_queue *txq, struct sk_buff *skb)

desc->l4i_chk = 0;
desc->byte_cnt = skb_frag_size(this_frag);
desc->buf_ptr = dma_map_single(mp->dev->dev.parent, addr,
desc->byte_cnt, DMA_TO_DEVICE);
desc->buf_ptr = skb_frag_dma_map(mp->dev->dev.parent,
this_frag, 0, desc->byte_cnt,
DMA_TO_DEVICE);
}
}

Expand Down Expand Up @@ -936,6 +942,7 @@ static int txq_submit_skb(struct tx_queue *txq, struct sk_buff *skb,
if (txq->tx_curr_desc == txq->tx_ring_size)
txq->tx_curr_desc = 0;
desc = &txq->tx_desc_area[tx_index];
txq->tx_desc_mapping[tx_index] = DESC_DMA_MAP_SINGLE;

if (nr_frags) {
txq_submit_frag_skb(txq, skb);
Expand Down Expand Up @@ -1047,9 +1054,12 @@ static int txq_reclaim(struct tx_queue *txq, int budget, int force)
int tx_index;
struct tx_desc *desc;
u32 cmd_sts;
char desc_dma_map;

tx_index = txq->tx_used_desc;
desc = &txq->tx_desc_area[tx_index];
desc_dma_map = txq->tx_desc_mapping[tx_index];

cmd_sts = desc->cmd_sts;

if (cmd_sts & BUFFER_OWNED_BY_DMA) {
Expand All @@ -1065,9 +1075,19 @@ static int txq_reclaim(struct tx_queue *txq, int budget, int force)
reclaimed++;
txq->tx_desc_count--;

if (!IS_TSO_HEADER(txq, desc->buf_ptr))
dma_unmap_single(mp->dev->dev.parent, desc->buf_ptr,
desc->byte_cnt, DMA_TO_DEVICE);
if (!IS_TSO_HEADER(txq, desc->buf_ptr)) {

if (desc_dma_map == DESC_DMA_MAP_PAGE)
dma_unmap_page(mp->dev->dev.parent,
desc->buf_ptr,
desc->byte_cnt,
DMA_TO_DEVICE);
else
dma_unmap_single(mp->dev->dev.parent,
desc->buf_ptr,
desc->byte_cnt,
DMA_TO_DEVICE);
}

if (cmd_sts & TX_ENABLE_INTERRUPT) {
struct sk_buff *skb = __skb_dequeue(&txq->tx_skb);
Expand Down Expand Up @@ -1996,6 +2016,7 @@ static int txq_init(struct mv643xx_eth_private *mp, int index)
struct tx_queue *txq = mp->txq + index;
struct tx_desc *tx_desc;
int size;
int ret;
int i;

txq->index = index;
Expand Down Expand Up @@ -2048,18 +2069,34 @@ static int txq_init(struct mv643xx_eth_private *mp, int index)
nexti * sizeof(struct tx_desc);
}

txq->tx_desc_mapping = kcalloc(txq->tx_ring_size, sizeof(char),
GFP_KERNEL);
if (!txq->tx_desc_mapping) {
ret = -ENOMEM;
goto err_free_desc_area;
}

/* Allocate DMA buffers for TSO MAC/IP/TCP headers */
txq->tso_hdrs = dma_alloc_coherent(mp->dev->dev.parent,
txq->tx_ring_size * TSO_HEADER_SIZE,
&txq->tso_hdrs_dma, GFP_KERNEL);
if (txq->tso_hdrs == NULL) {
dma_free_coherent(mp->dev->dev.parent, txq->tx_desc_area_size,
txq->tx_desc_area, txq->tx_desc_dma);
return -ENOMEM;
ret = -ENOMEM;
goto err_free_desc_mapping;
}
skb_queue_head_init(&txq->tx_skb);

return 0;

err_free_desc_mapping:
kfree(txq->tx_desc_mapping);
err_free_desc_area:
if (index == 0 && size <= mp->tx_desc_sram_size)
iounmap(txq->tx_desc_area);
else
dma_free_coherent(mp->dev->dev.parent, txq->tx_desc_area_size,
txq->tx_desc_area, txq->tx_desc_dma);
return ret;
}

static void txq_deinit(struct tx_queue *txq)
Expand All @@ -2077,6 +2114,8 @@ static void txq_deinit(struct tx_queue *txq)
else
dma_free_coherent(mp->dev->dev.parent, txq->tx_desc_area_size,
txq->tx_desc_area, txq->tx_desc_dma);
kfree(txq->tx_desc_mapping);

if (txq->tso_hdrs)
dma_free_coherent(mp->dev->dev.parent,
txq->tx_ring_size * TSO_HEADER_SIZE,
Expand Down

0 comments on commit 9e91141

Please sign in to comment.