Skip to content

Commit

Permalink
Merge branch 'topic/dmatest' into for-linus
Browse files Browse the repository at this point in the history
  • Loading branch information
Vinod Koul committed Sep 6, 2017
2 parents 346ea25 + 3eeb515 commit 41bd031
Show file tree
Hide file tree
Showing 12 changed files with 141 additions and 830 deletions.
7 changes: 0 additions & 7 deletions Documentation/dmaengine/provider.txt
Original file line number Diff line number Diff line change
Expand Up @@ -181,13 +181,6 @@ Currently, the types available are:
- Used by the client drivers to register a callback that will be
called on a regular basis through the DMA controller interrupt

* DMA_SG
- The device supports memory to memory scatter-gather
transfers.
- Even though a plain memcpy can look like a particular case of a
scatter-gather transfer, with a single chunk to transfer, it's a
distinct transaction type in the mem2mem transfers case

* DMA_PRIVATE
- The devices only supports slave transfers, and as such isn't
available for async transfers.
Expand Down
23 changes: 0 additions & 23 deletions drivers/crypto/ccp/ccp-dmaengine.c
Original file line number Diff line number Diff line change
Expand Up @@ -502,27 +502,6 @@ static struct dma_async_tx_descriptor *ccp_prep_dma_memcpy(
return &desc->tx_desc;
}

static struct dma_async_tx_descriptor *ccp_prep_dma_sg(
struct dma_chan *dma_chan, struct scatterlist *dst_sg,
unsigned int dst_nents, struct scatterlist *src_sg,
unsigned int src_nents, unsigned long flags)
{
struct ccp_dma_chan *chan = container_of(dma_chan, struct ccp_dma_chan,
dma_chan);
struct ccp_dma_desc *desc;

dev_dbg(chan->ccp->dev,
"%s - src=%p, src_nents=%u dst=%p, dst_nents=%u, flags=%#lx\n",
__func__, src_sg, src_nents, dst_sg, dst_nents, flags);

desc = ccp_create_desc(dma_chan, dst_sg, dst_nents, src_sg, src_nents,
flags);
if (!desc)
return NULL;

return &desc->tx_desc;
}

static struct dma_async_tx_descriptor *ccp_prep_dma_interrupt(
struct dma_chan *dma_chan, unsigned long flags)
{
Expand Down Expand Up @@ -704,7 +683,6 @@ int ccp_dmaengine_register(struct ccp_device *ccp)
dma_dev->directions = DMA_MEM_TO_MEM;
dma_dev->residue_granularity = DMA_RESIDUE_GRANULARITY_DESCRIPTOR;
dma_cap_set(DMA_MEMCPY, dma_dev->cap_mask);
dma_cap_set(DMA_SG, dma_dev->cap_mask);
dma_cap_set(DMA_INTERRUPT, dma_dev->cap_mask);

/* The DMA channels for this device can be set to public or private,
Expand Down Expand Up @@ -740,7 +718,6 @@ int ccp_dmaengine_register(struct ccp_device *ccp)

dma_dev->device_free_chan_resources = ccp_free_chan_resources;
dma_dev->device_prep_dma_memcpy = ccp_prep_dma_memcpy;
dma_dev->device_prep_dma_sg = ccp_prep_dma_sg;
dma_dev->device_prep_dma_interrupt = ccp_prep_dma_interrupt;
dma_dev->device_issue_pending = ccp_issue_pending;
dma_dev->device_tx_status = ccp_tx_status;
Expand Down
140 changes: 1 addition & 139 deletions drivers/dma/at_hdmac.c
Original file line number Diff line number Diff line change
Expand Up @@ -1202,138 +1202,6 @@ atc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
return NULL;
}

/**
* atc_prep_dma_sg - prepare memory to memory scather-gather operation
* @chan: the channel to prepare operation on
* @dst_sg: destination scatterlist
* @dst_nents: number of destination scatterlist entries
* @src_sg: source scatterlist
* @src_nents: number of source scatterlist entries
* @flags: tx descriptor status flags
*/
static struct dma_async_tx_descriptor *
atc_prep_dma_sg(struct dma_chan *chan,
struct scatterlist *dst_sg, unsigned int dst_nents,
struct scatterlist *src_sg, unsigned int src_nents,
unsigned long flags)
{
struct at_dma_chan *atchan = to_at_dma_chan(chan);
struct at_desc *desc = NULL;
struct at_desc *first = NULL;
struct at_desc *prev = NULL;
unsigned int src_width;
unsigned int dst_width;
size_t xfer_count;
u32 ctrla;
u32 ctrlb;
size_t dst_len = 0, src_len = 0;
dma_addr_t dst = 0, src = 0;
size_t len = 0, total_len = 0;

if (unlikely(dst_nents == 0 || src_nents == 0))
return NULL;

if (unlikely(dst_sg == NULL || src_sg == NULL))
return NULL;

ctrlb = ATC_DEFAULT_CTRLB | ATC_IEN
| ATC_SRC_ADDR_MODE_INCR
| ATC_DST_ADDR_MODE_INCR
| ATC_FC_MEM2MEM;

/*
* loop until there is either no more source or no more destination
* scatterlist entry
*/
while (true) {

/* prepare the next transfer */
if (dst_len == 0) {

/* no more destination scatterlist entries */
if (!dst_sg || !dst_nents)
break;

dst = sg_dma_address(dst_sg);
dst_len = sg_dma_len(dst_sg);

dst_sg = sg_next(dst_sg);
dst_nents--;
}

if (src_len == 0) {

/* no more source scatterlist entries */
if (!src_sg || !src_nents)
break;

src = sg_dma_address(src_sg);
src_len = sg_dma_len(src_sg);

src_sg = sg_next(src_sg);
src_nents--;
}

len = min_t(size_t, src_len, dst_len);
if (len == 0)
continue;

/* take care for the alignment */
src_width = dst_width = atc_get_xfer_width(src, dst, len);

ctrla = ATC_SRC_WIDTH(src_width) |
ATC_DST_WIDTH(dst_width);

/*
* The number of transfers to set up refer to the source width
* that depends on the alignment.
*/
xfer_count = len >> src_width;
if (xfer_count > ATC_BTSIZE_MAX) {
xfer_count = ATC_BTSIZE_MAX;
len = ATC_BTSIZE_MAX << src_width;
}

/* create the transfer */
desc = atc_desc_get(atchan);
if (!desc)
goto err_desc_get;

desc->lli.saddr = src;
desc->lli.daddr = dst;
desc->lli.ctrla = ctrla | xfer_count;
desc->lli.ctrlb = ctrlb;

desc->txd.cookie = 0;
desc->len = len;

atc_desc_chain(&first, &prev, desc);

/* update the lengths and addresses for the next loop cycle */
dst_len -= len;
src_len -= len;
dst += len;
src += len;

total_len += len;
}

/* First descriptor of the chain embedds additional information */
first->txd.cookie = -EBUSY;
first->total_len = total_len;

/* set end-of-link to the last link descriptor of list*/
set_desc_eol(desc);

first->txd.flags = flags; /* client is in control of this ack */

return &first->txd;

err_desc_get:
atc_desc_put(atchan, first);
return NULL;
}

/**
* atc_dma_cyclic_check_values
* Check for too big/unaligned periods and unaligned DMA buffer
Expand Down Expand Up @@ -1933,14 +1801,12 @@ static int __init at_dma_probe(struct platform_device *pdev)

/* setup platform data for each SoC */
dma_cap_set(DMA_MEMCPY, at91sam9rl_config.cap_mask);
dma_cap_set(DMA_SG, at91sam9rl_config.cap_mask);
dma_cap_set(DMA_INTERLEAVE, at91sam9g45_config.cap_mask);
dma_cap_set(DMA_MEMCPY, at91sam9g45_config.cap_mask);
dma_cap_set(DMA_MEMSET, at91sam9g45_config.cap_mask);
dma_cap_set(DMA_MEMSET_SG, at91sam9g45_config.cap_mask);
dma_cap_set(DMA_PRIVATE, at91sam9g45_config.cap_mask);
dma_cap_set(DMA_SLAVE, at91sam9g45_config.cap_mask);
dma_cap_set(DMA_SG, at91sam9g45_config.cap_mask);

/* get DMA parameters from controller type */
plat_dat = at_dma_get_driver_data(pdev);
Expand Down Expand Up @@ -2078,16 +1944,12 @@ static int __init at_dma_probe(struct platform_device *pdev)
atdma->dma_common.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
}

if (dma_has_cap(DMA_SG, atdma->dma_common.cap_mask))
atdma->dma_common.device_prep_dma_sg = atc_prep_dma_sg;

dma_writel(atdma, EN, AT_DMA_ENABLE);

dev_info(&pdev->dev, "Atmel AHB DMA Controller ( %s%s%s%s), %d channels\n",
dev_info(&pdev->dev, "Atmel AHB DMA Controller ( %s%s%s), %d channels\n",
dma_has_cap(DMA_MEMCPY, atdma->dma_common.cap_mask) ? "cpy " : "",
dma_has_cap(DMA_MEMSET, atdma->dma_common.cap_mask) ? "set " : "",
dma_has_cap(DMA_SLAVE, atdma->dma_common.cap_mask) ? "slave " : "",
dma_has_cap(DMA_SG, atdma->dma_common.cap_mask) ? "sg-cpy " : "",
plat_dat->nr_channels);

dma_async_device_register(&atdma->dma_common);
Expand Down
103 changes: 79 additions & 24 deletions drivers/dma/dmaengine.c
Original file line number Diff line number Diff line change
Expand Up @@ -923,30 +923,85 @@ int dma_async_device_register(struct dma_device *device)
return -ENODEV;

/* validate device routines */
BUG_ON(dma_has_cap(DMA_MEMCPY, device->cap_mask) &&
!device->device_prep_dma_memcpy);
BUG_ON(dma_has_cap(DMA_XOR, device->cap_mask) &&
!device->device_prep_dma_xor);
BUG_ON(dma_has_cap(DMA_XOR_VAL, device->cap_mask) &&
!device->device_prep_dma_xor_val);
BUG_ON(dma_has_cap(DMA_PQ, device->cap_mask) &&
!device->device_prep_dma_pq);
BUG_ON(dma_has_cap(DMA_PQ_VAL, device->cap_mask) &&
!device->device_prep_dma_pq_val);
BUG_ON(dma_has_cap(DMA_MEMSET, device->cap_mask) &&
!device->device_prep_dma_memset);
BUG_ON(dma_has_cap(DMA_INTERRUPT, device->cap_mask) &&
!device->device_prep_dma_interrupt);
BUG_ON(dma_has_cap(DMA_SG, device->cap_mask) &&
!device->device_prep_dma_sg);
BUG_ON(dma_has_cap(DMA_CYCLIC, device->cap_mask) &&
!device->device_prep_dma_cyclic);
BUG_ON(dma_has_cap(DMA_INTERLEAVE, device->cap_mask) &&
!device->device_prep_interleaved_dma);

BUG_ON(!device->device_tx_status);
BUG_ON(!device->device_issue_pending);
BUG_ON(!device->dev);
if (!device->dev) {
pr_err("DMAdevice must have dev\n");
return -EIO;
}

if (dma_has_cap(DMA_MEMCPY, device->cap_mask) && !device->device_prep_dma_memcpy) {
dev_err(device->dev,
"Device claims capability %s, but op is not defined\n",
"DMA_MEMCPY");
return -EIO;
}

if (dma_has_cap(DMA_XOR, device->cap_mask) && !device->device_prep_dma_xor) {
dev_err(device->dev,
"Device claims capability %s, but op is not defined\n",
"DMA_XOR");
return -EIO;
}

if (dma_has_cap(DMA_XOR_VAL, device->cap_mask) && !device->device_prep_dma_xor_val) {
dev_err(device->dev,
"Device claims capability %s, but op is not defined\n",
"DMA_XOR_VAL");
return -EIO;
}

if (dma_has_cap(DMA_PQ, device->cap_mask) && !device->device_prep_dma_pq) {
dev_err(device->dev,
"Device claims capability %s, but op is not defined\n",
"DMA_PQ");
return -EIO;
}

if (dma_has_cap(DMA_PQ_VAL, device->cap_mask) && !device->device_prep_dma_pq_val) {
dev_err(device->dev,
"Device claims capability %s, but op is not defined\n",
"DMA_PQ_VAL");
return -EIO;
}

if (dma_has_cap(DMA_MEMSET, device->cap_mask) && !device->device_prep_dma_memset) {
dev_err(device->dev,
"Device claims capability %s, but op is not defined\n",
"DMA_MEMSET");
return -EIO;
}

if (dma_has_cap(DMA_INTERRUPT, device->cap_mask) && !device->device_prep_dma_interrupt) {
dev_err(device->dev,
"Device claims capability %s, but op is not defined\n",
"DMA_INTERRUPT");
return -EIO;
}

if (dma_has_cap(DMA_CYCLIC, device->cap_mask) && !device->device_prep_dma_cyclic) {
dev_err(device->dev,
"Device claims capability %s, but op is not defined\n",
"DMA_CYCLIC");
return -EIO;
}

if (dma_has_cap(DMA_INTERLEAVE, device->cap_mask) && !device->device_prep_interleaved_dma) {
dev_err(device->dev,
"Device claims capability %s, but op is not defined\n",
"DMA_INTERLEAVE");
return -EIO;
}


if (!device->device_tx_status) {
dev_err(device->dev, "Device tx_status is not defined\n");
return -EIO;
}


if (!device->device_issue_pending) {
dev_err(device->dev, "Device issue_pending is not defined\n");
return -EIO;
}

/* note: this only matters in the
* CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH=n case
Expand Down
Loading

0 comments on commit 41bd031

Please sign in to comment.