@@ -1386,32 +1386,6 @@ static u32 pl08x_get_cctl(struct pl08x_dma_chan *plchan,
1386
1386
return pl08x_cctl (cctl );
1387
1387
}
1388
1388
1389
- static int dma_set_runtime_config (struct dma_chan * chan ,
1390
- struct dma_slave_config * config )
1391
- {
1392
- struct pl08x_dma_chan * plchan = to_pl08x_chan (chan );
1393
- struct pl08x_driver_data * pl08x = plchan -> host ;
1394
-
1395
- if (!plchan -> slave )
1396
- return - EINVAL ;
1397
-
1398
- /* Reject definitely invalid configurations */
1399
- if (config -> src_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES ||
1400
- config -> dst_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES )
1401
- return - EINVAL ;
1402
-
1403
- if (config -> device_fc && pl08x -> vd -> pl080s ) {
1404
- dev_err (& pl08x -> adev -> dev ,
1405
- "%s: PL080S does not support peripheral flow control\n" ,
1406
- __func__ );
1407
- return - EINVAL ;
1408
- }
1409
-
1410
- plchan -> cfg = * config ;
1411
-
1412
- return 0 ;
1413
- }
1414
-
1415
1389
/*
1416
1390
* Slave transactions callback to the slave device to allow
1417
1391
* synchronization of slave DMA signals with the DMAC enable
@@ -1693,20 +1667,71 @@ static struct dma_async_tx_descriptor *pl08x_prep_dma_cyclic(
1693
1667
return vchan_tx_prep (& plchan -> vc , & txd -> vd , flags );
1694
1668
}
1695
1669
1696
- static int pl08x_control (struct dma_chan * chan , enum dma_ctrl_cmd cmd ,
1697
- unsigned long arg )
1670
+ static int pl08x_config (struct dma_chan * chan ,
1671
+ struct dma_slave_config * config )
1672
+ {
1673
+ struct pl08x_dma_chan * plchan = to_pl08x_chan (chan );
1674
+ struct pl08x_driver_data * pl08x = plchan -> host ;
1675
+
1676
+ if (!plchan -> slave )
1677
+ return - EINVAL ;
1678
+
1679
+ /* Reject definitely invalid configurations */
1680
+ if (config -> src_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES ||
1681
+ config -> dst_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES )
1682
+ return - EINVAL ;
1683
+
1684
+ if (config -> device_fc && pl08x -> vd -> pl080s ) {
1685
+ dev_err (& pl08x -> adev -> dev ,
1686
+ "%s: PL080S does not support peripheral flow control\n" ,
1687
+ __func__ );
1688
+ return - EINVAL ;
1689
+ }
1690
+
1691
+ plchan -> cfg = * config ;
1692
+
1693
+ return 0 ;
1694
+ }
1695
+
1696
+ static int pl08x_terminate_all (struct dma_chan * chan )
1698
1697
{
1699
1698
struct pl08x_dma_chan * plchan = to_pl08x_chan (chan );
1700
1699
struct pl08x_driver_data * pl08x = plchan -> host ;
1701
1700
unsigned long flags ;
1702
- int ret = 0 ;
1703
1701
1704
- /* Controls applicable to inactive channels */
1705
- if (cmd == DMA_SLAVE_CONFIG ) {
1706
- return dma_set_runtime_config ( chan ,
1707
- ( struct dma_slave_config * ) arg ) ;
1702
+ spin_lock_irqsave ( & plchan -> vc . lock , flags );
1703
+ if (! plchan -> phychan && ! plchan -> at ) {
1704
+ spin_unlock_irqrestore ( & plchan -> vc . lock , flags );
1705
+ return 0 ;
1708
1706
}
1709
1707
1708
+ plchan -> state = PL08X_CHAN_IDLE ;
1709
+
1710
+ if (plchan -> phychan ) {
1711
+ /*
1712
+ * Mark physical channel as free and free any slave
1713
+ * signal
1714
+ */
1715
+ pl08x_phy_free (plchan );
1716
+ }
1717
+ /* Dequeue jobs and free LLIs */
1718
+ if (plchan -> at ) {
1719
+ pl08x_desc_free (& plchan -> at -> vd );
1720
+ plchan -> at = NULL ;
1721
+ }
1722
+ /* Dequeue jobs not yet fired as well */
1723
+ pl08x_free_txd_list (pl08x , plchan );
1724
+
1725
+ spin_unlock_irqrestore (& plchan -> vc .lock , flags );
1726
+
1727
+ return 0 ;
1728
+ }
1729
+
1730
+ static int pl08x_pause (struct dma_chan * chan )
1731
+ {
1732
+ struct pl08x_dma_chan * plchan = to_pl08x_chan (chan );
1733
+ unsigned long flags ;
1734
+
1710
1735
/*
1711
1736
* Anything succeeds on channels with no physical allocation and
1712
1737
* no queued transfers.
@@ -1717,42 +1742,35 @@ static int pl08x_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
1717
1742
return 0 ;
1718
1743
}
1719
1744
1720
- switch (cmd ) {
1721
- case DMA_TERMINATE_ALL :
1722
- plchan -> state = PL08X_CHAN_IDLE ;
1745
+ pl08x_pause_phy_chan (plchan -> phychan );
1746
+ plchan -> state = PL08X_CHAN_PAUSED ;
1723
1747
1724
- if (plchan -> phychan ) {
1725
- /*
1726
- * Mark physical channel as free and free any slave
1727
- * signal
1728
- */
1729
- pl08x_phy_free (plchan );
1730
- }
1731
- /* Dequeue jobs and free LLIs */
1732
- if (plchan -> at ) {
1733
- pl08x_desc_free (& plchan -> at -> vd );
1734
- plchan -> at = NULL ;
1735
- }
1736
- /* Dequeue jobs not yet fired as well */
1737
- pl08x_free_txd_list (pl08x , plchan );
1738
- break ;
1739
- case DMA_PAUSE :
1740
- pl08x_pause_phy_chan (plchan -> phychan );
1741
- plchan -> state = PL08X_CHAN_PAUSED ;
1742
- break ;
1743
- case DMA_RESUME :
1744
- pl08x_resume_phy_chan (plchan -> phychan );
1745
- plchan -> state = PL08X_CHAN_RUNNING ;
1746
- break ;
1747
- default :
1748
- /* Unknown command */
1749
- ret = - ENXIO ;
1750
- break ;
1748
+ spin_unlock_irqrestore (& plchan -> vc .lock , flags );
1749
+
1750
+ return 0 ;
1751
+ }
1752
+
1753
+ static int pl08x_resume (struct dma_chan * chan )
1754
+ {
1755
+ struct pl08x_dma_chan * plchan = to_pl08x_chan (chan );
1756
+ unsigned long flags ;
1757
+
1758
+ /*
1759
+ * Anything succeeds on channels with no physical allocation and
1760
+ * no queued transfers.
1761
+ */
1762
+ spin_lock_irqsave (& plchan -> vc .lock , flags );
1763
+ if (!plchan -> phychan && !plchan -> at ) {
1764
+ spin_unlock_irqrestore (& plchan -> vc .lock , flags );
1765
+ return 0 ;
1751
1766
}
1752
1767
1768
+ pl08x_resume_phy_chan (plchan -> phychan );
1769
+ plchan -> state = PL08X_CHAN_RUNNING ;
1770
+
1753
1771
spin_unlock_irqrestore (& plchan -> vc .lock , flags );
1754
1772
1755
- return ret ;
1773
+ return 0 ;
1756
1774
}
1757
1775
1758
1776
bool pl08x_filter_id (struct dma_chan * chan , void * chan_id )
@@ -2048,7 +2066,10 @@ static int pl08x_probe(struct amba_device *adev, const struct amba_id *id)
2048
2066
pl08x -> memcpy .device_prep_dma_interrupt = pl08x_prep_dma_interrupt ;
2049
2067
pl08x -> memcpy .device_tx_status = pl08x_dma_tx_status ;
2050
2068
pl08x -> memcpy .device_issue_pending = pl08x_issue_pending ;
2051
- pl08x -> memcpy .device_control = pl08x_control ;
2069
+ pl08x -> memcpy .device_config = pl08x_config ;
2070
+ pl08x -> memcpy .device_pause = pl08x_pause ;
2071
+ pl08x -> memcpy .device_resume = pl08x_resume ;
2072
+ pl08x -> memcpy .device_terminate_all = pl08x_terminate_all ;
2052
2073
2053
2074
/* Initialize slave engine */
2054
2075
dma_cap_set (DMA_SLAVE , pl08x -> slave .cap_mask );
@@ -2061,7 +2082,10 @@ static int pl08x_probe(struct amba_device *adev, const struct amba_id *id)
2061
2082
pl08x -> slave .device_issue_pending = pl08x_issue_pending ;
2062
2083
pl08x -> slave .device_prep_slave_sg = pl08x_prep_slave_sg ;
2063
2084
pl08x -> slave .device_prep_dma_cyclic = pl08x_prep_dma_cyclic ;
2064
- pl08x -> slave .device_control = pl08x_control ;
2085
+ pl08x -> slave .device_config = pl08x_config ;
2086
+ pl08x -> slave .device_pause = pl08x_pause ;
2087
+ pl08x -> slave .device_resume = pl08x_resume ;
2088
+ pl08x -> slave .device_terminate_all = pl08x_terminate_all ;
2065
2089
2066
2090
/* Get the platform data */
2067
2091
pl08x -> pd = dev_get_platdata (& adev -> dev );
0 commit comments