Skip to content

Commit

Permalink
ntb: add DMA error handling for RX DMA
Browse files Browse the repository at this point in the history
Adding support on the rx DMA path to allow recovery of errors when
DMA responds with error status and abort all the subsequent ops.

Signed-off-by: Dave Jiang <[email protected]>
Acked-by: Allen Hubbe <[email protected]>
Cc: Jon Mason <[email protected]>
Cc: [email protected]
Signed-off-by: Vinod Koul <[email protected]>
  • Loading branch information
davejiang authored and Vinod Koul committed Aug 8, 2016
1 parent 9cabc26 commit 7220357
Showing 1 changed file with 67 additions and 16 deletions.
83 changes: 67 additions & 16 deletions drivers/ntb/ntb_transport.c
Original file line number Diff line number Diff line change
Expand Up @@ -105,13 +105,13 @@ struct ntb_queue_entry {
int retries;
int errors;
unsigned int tx_index;
unsigned int rx_index;

struct ntb_transport_qp *qp;
union {
struct ntb_payload_header __iomem *tx_hdr;
struct ntb_payload_header *rx_hdr;
};
unsigned int index;
};

struct ntb_rx_info {
Expand Down Expand Up @@ -265,6 +265,9 @@ static struct ntb_client ntb_transport_client;
static int ntb_async_tx_submit(struct ntb_transport_qp *qp,
struct ntb_queue_entry *entry);
static void ntb_memcpy_tx(struct ntb_queue_entry *entry, void __iomem *offset);
static int ntb_async_rx_submit(struct ntb_queue_entry *entry, void *offset);
static void ntb_memcpy_rx(struct ntb_queue_entry *entry, void *offset);


static int ntb_transport_bus_match(struct device *dev,
struct device_driver *drv)
Expand Down Expand Up @@ -1235,7 +1238,7 @@ static void ntb_complete_rxc(struct ntb_transport_qp *qp)
break;

entry->rx_hdr->flags = 0;
iowrite32(entry->index, &qp->rx_info->entry);
iowrite32(entry->rx_index, &qp->rx_info->entry);

cb_data = entry->cb_data;
len = entry->len;
Expand All @@ -1253,10 +1256,36 @@ static void ntb_complete_rxc(struct ntb_transport_qp *qp)
spin_unlock_irqrestore(&qp->ntb_rx_q_lock, irqflags);
}

static void ntb_rx_copy_callback(void *data)
static void ntb_rx_copy_callback(void *data,
const struct dmaengine_result *res)
{
struct ntb_queue_entry *entry = data;

/* we need to check DMA results if we are using DMA */
if (res) {
enum dmaengine_tx_result dma_err = res->result;

switch (dma_err) {
case DMA_TRANS_READ_FAILED:
case DMA_TRANS_WRITE_FAILED:
entry->errors++;
case DMA_TRANS_ABORTED:
{
struct ntb_transport_qp *qp = entry->qp;
void *offset = qp->rx_buff + qp->rx_max_frame *
qp->rx_index;

ntb_memcpy_rx(entry, offset);
qp->rx_memcpy++;
return;
}

case DMA_TRANS_NOERROR:
default:
break;
}
}

entry->flags |= DESC_DONE_FLAG;

ntb_complete_rxc(entry->qp);
Expand All @@ -1272,10 +1301,10 @@ static void ntb_memcpy_rx(struct ntb_queue_entry *entry, void *offset)
/* Ensure that the data is fully copied out before clearing the flag */
wmb();

ntb_rx_copy_callback(entry);
ntb_rx_copy_callback(entry, NULL);
}

static void ntb_async_rx(struct ntb_queue_entry *entry, void *offset)
static int ntb_async_rx_submit(struct ntb_queue_entry *entry, void *offset)
{
struct dma_async_tx_descriptor *txd;
struct ntb_transport_qp *qp = entry->qp;
Expand All @@ -1288,13 +1317,6 @@ static void ntb_async_rx(struct ntb_queue_entry *entry, void *offset)
int retries = 0;

len = entry->len;

if (!chan)
goto err;

if (len < copy_bytes)
goto err;

device = chan->device;
pay_off = (size_t)offset & ~PAGE_MASK;
buff_off = (size_t)buf & ~PAGE_MASK;
Expand Down Expand Up @@ -1322,7 +1344,8 @@ static void ntb_async_rx(struct ntb_queue_entry *entry, void *offset)
unmap->from_cnt = 1;

for (retries = 0; retries < DMA_RETRIES; retries++) {
txd = device->device_prep_dma_memcpy(chan, unmap->addr[1],
txd = device->device_prep_dma_memcpy(chan,
unmap->addr[1],
unmap->addr[0], len,
DMA_PREP_INTERRUPT);
if (txd)
Expand All @@ -1337,7 +1360,7 @@ static void ntb_async_rx(struct ntb_queue_entry *entry, void *offset)
goto err_get_unmap;
}

txd->callback = ntb_rx_copy_callback;
txd->callback_result = ntb_rx_copy_callback;
txd->callback_param = entry;
dma_set_unmap(txd, unmap);

Expand All @@ -1351,12 +1374,37 @@ static void ntb_async_rx(struct ntb_queue_entry *entry, void *offset)

qp->rx_async++;

return;
return 0;

err_set_unmap:
dmaengine_unmap_put(unmap);
err_get_unmap:
dmaengine_unmap_put(unmap);
err:
return -ENXIO;
}

static void ntb_async_rx(struct ntb_queue_entry *entry, void *offset)
{
struct ntb_transport_qp *qp = entry->qp;
struct dma_chan *chan = qp->rx_dma_chan;
int res;

if (!chan)
goto err;

if (entry->len < copy_bytes)
goto err;

res = ntb_async_rx_submit(entry, offset);
if (res < 0)
goto err;

if (!entry->retries)
qp->rx_async++;

return;

err:
ntb_memcpy_rx(entry, offset);
qp->rx_memcpy++;
Expand Down Expand Up @@ -1403,7 +1451,7 @@ static int ntb_process_rxc(struct ntb_transport_qp *qp)
}

entry->rx_hdr = hdr;
entry->index = qp->rx_index;
entry->rx_index = qp->rx_index;

if (hdr->len > entry->len) {
dev_dbg(&qp->ndev->pdev->dev,
Expand Down Expand Up @@ -1981,6 +2029,9 @@ int ntb_transport_rx_enqueue(struct ntb_transport_qp *qp, void *cb, void *data,
entry->buf = data;
entry->len = len;
entry->flags = 0;
entry->retries = 0;
entry->errors = 0;
entry->rx_index = 0;

ntb_list_add(&qp->ntb_rx_q_lock, &entry->entry, &qp->rx_pend_q);

Expand Down

0 comments on commit 7220357

Please sign in to comment.