Skip to content

Commit

Permalink
net: ethernet: ti: davinci_cpdma: add dma mapped submit
Browse files Browse the repository at this point in the history
In case if dma mapped packet needs to be sent, like with XDP
page pool, the "mapped" submit can be used. This patch adds dma
mapped submit based on regular one.

Signed-off-by: Ivan Khoronzhuk <[email protected]>
Signed-off-by: David S. Miller <[email protected]>
  • Loading branch information
ikhorn authored and davem330 committed Jul 8, 2019
1 parent 1da4bbe commit 6670aca
Show file tree
Hide file tree
Showing 2 changed files with 83 additions and 10 deletions.
89 changes: 79 additions & 10 deletions drivers/net/ethernet/ti/davinci_cpdma.c
Original file line number Diff line number Diff line change
Expand Up @@ -139,6 +139,7 @@ struct submit_info {
int directed;
void *token;
void *data;
int flags;
int len;
};

Expand Down Expand Up @@ -184,6 +185,8 @@ static struct cpdma_control_info controls[] = {
(directed << CPDMA_TO_PORT_SHIFT)); \
} while (0)

#define CPDMA_DMA_EXT_MAP BIT(16)

static void cpdma_desc_pool_destroy(struct cpdma_ctlr *ctlr)
{
struct cpdma_desc_pool *pool = ctlr->pool;
Expand Down Expand Up @@ -1015,6 +1018,7 @@ static int cpdma_chan_submit_si(struct submit_info *si)
struct cpdma_chan *chan = si->chan;
struct cpdma_ctlr *ctlr = chan->ctlr;
int len = si->len;
int swlen = len;
struct cpdma_desc __iomem *desc;
dma_addr_t buffer;
u32 mode;
Expand All @@ -1036,16 +1040,22 @@ static int cpdma_chan_submit_si(struct submit_info *si)
chan->stats.runt_transmit_buff++;
}

buffer = dma_map_single(ctlr->dev, si->data, len, chan->dir);
ret = dma_mapping_error(ctlr->dev, buffer);
if (ret) {
cpdma_desc_free(ctlr->pool, desc, 1);
return -EINVAL;
}

mode = CPDMA_DESC_OWNER | CPDMA_DESC_SOP | CPDMA_DESC_EOP;
cpdma_desc_to_port(chan, mode, si->directed);

if (si->flags & CPDMA_DMA_EXT_MAP) {
buffer = (dma_addr_t)si->data;
dma_sync_single_for_device(ctlr->dev, buffer, len, chan->dir);
swlen |= CPDMA_DMA_EXT_MAP;
} else {
buffer = dma_map_single(ctlr->dev, si->data, len, chan->dir);
ret = dma_mapping_error(ctlr->dev, buffer);
if (ret) {
cpdma_desc_free(ctlr->pool, desc, 1);
return -EINVAL;
}
}

/* Relaxed IO accessors can be used here as there is read barrier
* at the end of write sequence.
*/
Expand All @@ -1055,7 +1065,7 @@ static int cpdma_chan_submit_si(struct submit_info *si)
writel_relaxed(mode | len, &desc->hw_mode);
writel_relaxed((uintptr_t)si->token, &desc->sw_token);
writel_relaxed(buffer, &desc->sw_buffer);
writel_relaxed(len, &desc->sw_len);
writel_relaxed(swlen, &desc->sw_len);
desc_read(desc, sw_len);

__cpdma_chan_submit(chan, desc);
Expand All @@ -1079,6 +1089,32 @@ int cpdma_chan_idle_submit(struct cpdma_chan *chan, void *token, void *data,
si.data = data;
si.len = len;
si.directed = directed;
si.flags = 0;

spin_lock_irqsave(&chan->lock, flags);
if (chan->state == CPDMA_STATE_TEARDOWN) {
spin_unlock_irqrestore(&chan->lock, flags);
return -EINVAL;
}

ret = cpdma_chan_submit_si(&si);
spin_unlock_irqrestore(&chan->lock, flags);
return ret;
}

int cpdma_chan_idle_submit_mapped(struct cpdma_chan *chan, void *token,
dma_addr_t data, int len, int directed)
{
struct submit_info si;
unsigned long flags;
int ret;

si.chan = chan;
si.token = token;
si.data = (void *)data;
si.len = len;
si.directed = directed;
si.flags = CPDMA_DMA_EXT_MAP;

spin_lock_irqsave(&chan->lock, flags);
if (chan->state == CPDMA_STATE_TEARDOWN) {
Expand All @@ -1103,6 +1139,32 @@ int cpdma_chan_submit(struct cpdma_chan *chan, void *token, void *data,
si.data = data;
si.len = len;
si.directed = directed;
si.flags = 0;

spin_lock_irqsave(&chan->lock, flags);
if (chan->state != CPDMA_STATE_ACTIVE) {
spin_unlock_irqrestore(&chan->lock, flags);
return -EINVAL;
}

ret = cpdma_chan_submit_si(&si);
spin_unlock_irqrestore(&chan->lock, flags);
return ret;
}

int cpdma_chan_submit_mapped(struct cpdma_chan *chan, void *token,
dma_addr_t data, int len, int directed)
{
struct submit_info si;
unsigned long flags;
int ret;

si.chan = chan;
si.token = token;
si.data = (void *)data;
si.len = len;
si.directed = directed;
si.flags = CPDMA_DMA_EXT_MAP;

spin_lock_irqsave(&chan->lock, flags);
if (chan->state != CPDMA_STATE_ACTIVE) {
Expand Down Expand Up @@ -1140,10 +1202,17 @@ static void __cpdma_chan_free(struct cpdma_chan *chan,
uintptr_t token;

token = desc_read(desc, sw_token);
buff_dma = desc_read(desc, sw_buffer);
origlen = desc_read(desc, sw_len);

dma_unmap_single(ctlr->dev, buff_dma, origlen, chan->dir);
buff_dma = desc_read(desc, sw_buffer);
if (origlen & CPDMA_DMA_EXT_MAP) {
origlen &= ~CPDMA_DMA_EXT_MAP;
dma_sync_single_for_cpu(ctlr->dev, buff_dma, origlen,
chan->dir);
} else {
dma_unmap_single(ctlr->dev, buff_dma, origlen, chan->dir);
}

cpdma_desc_free(pool, desc, 1);
(*chan->handler)((void *)token, outlen, status);
}
Expand Down
4 changes: 4 additions & 0 deletions drivers/net/ethernet/ti/davinci_cpdma.h
Original file line number Diff line number Diff line change
Expand Up @@ -77,8 +77,12 @@ int cpdma_chan_stop(struct cpdma_chan *chan);

int cpdma_chan_get_stats(struct cpdma_chan *chan,
struct cpdma_chan_stats *stats);
int cpdma_chan_submit_mapped(struct cpdma_chan *chan, void *token,
dma_addr_t data, int len, int directed);
int cpdma_chan_submit(struct cpdma_chan *chan, void *token, void *data,
int len, int directed);
int cpdma_chan_idle_submit_mapped(struct cpdma_chan *chan, void *token,
dma_addr_t data, int len, int directed);
int cpdma_chan_idle_submit(struct cpdma_chan *chan, void *token, void *data,
int len, int directed);
int cpdma_chan_process(struct cpdma_chan *chan, int quota);
Expand Down

0 comments on commit 6670aca

Please sign in to comment.