Skip to content

Commit 49b11bc

Browse files
tsbogenddavem330
authored andcommitted
SGISEEQ: use cached memory access to make driver work on IP28
- Use inline functions for dma_sync_* instead of macros - added Kconfig change to make selection for similair SGI boxes easier Signed-off-by: Thomas Bogendoerfer <[email protected]> Acked-by: Ralf Baechle <[email protected]> Signed-off-by: Jeff Garzik <[email protected]>
1 parent db17f39 commit 49b11bc

File tree

2 files changed

+35
-31
lines changed

2 files changed

+35
-31
lines changed

drivers/net/Kconfig

+1-1
Original file line numberDiff line numberDiff line change
@@ -1797,7 +1797,7 @@ config DE620
17971797

17981798
config SGISEEQ
17991799
tristate "SGI Seeq ethernet controller support"
1800-
depends on SGI_IP22
1800+
depends on SGI_HAS_SEEQ
18011801
help
18021802
Say Y here if you have an Seeq based Ethernet network card. This is
18031803
used in many Silicon Graphics machines.

drivers/net/sgiseeq.c

+34-30
Original file line numberDiff line numberDiff line change
@@ -56,14 +56,6 @@ static char *sgiseeqstr = "SGI Seeq8003";
5656
(dma_addr_t)((unsigned long)(v) - \
5757
(unsigned long)((sp)->rx_desc)))
5858

59-
#define DMA_SYNC_DESC_CPU(dev, addr) \
60-
do { dma_cache_sync((dev)->dev.parent, (void *)addr, \
61-
sizeof(struct sgiseeq_rx_desc), DMA_FROM_DEVICE); } while (0)
62-
63-
#define DMA_SYNC_DESC_DEV(dev, addr) \
64-
do { dma_cache_sync((dev)->dev.parent, (void *)addr, \
65-
sizeof(struct sgiseeq_rx_desc), DMA_TO_DEVICE); } while (0)
66-
6759
/* Copy frames shorter than rx_copybreak, otherwise pass on up in
6860
* a full sized sk_buff. Value of 100 stolen from tulip.c (!alpha).
6961
*/
@@ -116,6 +108,18 @@ struct sgiseeq_private {
116108
spinlock_t tx_lock;
117109
};
118110

111+
static inline void dma_sync_desc_cpu(struct net_device *dev, void *addr)
112+
{
113+
dma_cache_sync(dev->dev.parent, addr, sizeof(struct sgiseeq_rx_desc),
114+
DMA_FROM_DEVICE);
115+
}
116+
117+
static inline void dma_sync_desc_dev(struct net_device *dev, void *addr)
118+
{
119+
dma_cache_sync(dev->dev.parent, addr, sizeof(struct sgiseeq_rx_desc),
120+
DMA_TO_DEVICE);
121+
}
122+
119123
static inline void hpc3_eth_reset(struct hpc3_ethregs *hregs)
120124
{
121125
hregs->reset = HPC3_ERST_CRESET | HPC3_ERST_CLRIRQ;
@@ -184,7 +188,7 @@ static int seeq_init_ring(struct net_device *dev)
184188
/* Setup tx ring. */
185189
for(i = 0; i < SEEQ_TX_BUFFERS; i++) {
186190
sp->tx_desc[i].tdma.cntinfo = TCNTINFO_INIT;
187-
DMA_SYNC_DESC_DEV(dev, &sp->tx_desc[i]);
191+
dma_sync_desc_dev(dev, &sp->tx_desc[i]);
188192
}
189193

190194
/* And now the rx ring. */
@@ -203,10 +207,10 @@ static int seeq_init_ring(struct net_device *dev)
203207
sp->rx_desc[i].rdma.pbuf = dma_addr;
204208
}
205209
sp->rx_desc[i].rdma.cntinfo = RCNTINFO_INIT;
206-
DMA_SYNC_DESC_DEV(dev, &sp->rx_desc[i]);
210+
dma_sync_desc_dev(dev, &sp->rx_desc[i]);
207211
}
208212
sp->rx_desc[i - 1].rdma.cntinfo |= HPCDMA_EOR;
209-
DMA_SYNC_DESC_DEV(dev, &sp->rx_desc[i - 1]);
213+
dma_sync_desc_dev(dev, &sp->rx_desc[i - 1]);
210214
return 0;
211215
}
212216

@@ -341,7 +345,7 @@ static inline void sgiseeq_rx(struct net_device *dev, struct sgiseeq_private *sp
341345

342346
/* Service every received packet. */
343347
rd = &sp->rx_desc[sp->rx_new];
344-
DMA_SYNC_DESC_CPU(dev, rd);
348+
dma_sync_desc_cpu(dev, rd);
345349
while (!(rd->rdma.cntinfo & HPCDMA_OWN)) {
346350
len = PKT_BUF_SZ - (rd->rdma.cntinfo & HPCDMA_BCNT) - 3;
347351
dma_unmap_single(dev->dev.parent, rd->rdma.pbuf,
@@ -397,16 +401,16 @@ static inline void sgiseeq_rx(struct net_device *dev, struct sgiseeq_private *sp
397401
/* Return the entry to the ring pool. */
398402
rd->rdma.cntinfo = RCNTINFO_INIT;
399403
sp->rx_new = NEXT_RX(sp->rx_new);
400-
DMA_SYNC_DESC_DEV(dev, rd);
404+
dma_sync_desc_dev(dev, rd);
401405
rd = &sp->rx_desc[sp->rx_new];
402-
DMA_SYNC_DESC_CPU(dev, rd);
406+
dma_sync_desc_cpu(dev, rd);
403407
}
404-
DMA_SYNC_DESC_CPU(dev, &sp->rx_desc[orig_end]);
408+
dma_sync_desc_cpu(dev, &sp->rx_desc[orig_end]);
405409
sp->rx_desc[orig_end].rdma.cntinfo &= ~(HPCDMA_EOR);
406-
DMA_SYNC_DESC_DEV(dev, &sp->rx_desc[orig_end]);
407-
DMA_SYNC_DESC_CPU(dev, &sp->rx_desc[PREV_RX(sp->rx_new)]);
410+
dma_sync_desc_dev(dev, &sp->rx_desc[orig_end]);
411+
dma_sync_desc_cpu(dev, &sp->rx_desc[PREV_RX(sp->rx_new)]);
408412
sp->rx_desc[PREV_RX(sp->rx_new)].rdma.cntinfo |= HPCDMA_EOR;
409-
DMA_SYNC_DESC_DEV(dev, &sp->rx_desc[PREV_RX(sp->rx_new)]);
413+
dma_sync_desc_dev(dev, &sp->rx_desc[PREV_RX(sp->rx_new)]);
410414
rx_maybe_restart(sp, hregs, sregs);
411415
}
412416

@@ -433,12 +437,12 @@ static inline void kick_tx(struct net_device *dev,
433437
* is not active!
434438
*/
435439
td = &sp->tx_desc[i];
436-
DMA_SYNC_DESC_CPU(dev, td);
440+
dma_sync_desc_cpu(dev, td);
437441
while ((td->tdma.cntinfo & (HPCDMA_XIU | HPCDMA_ETXD)) ==
438442
(HPCDMA_XIU | HPCDMA_ETXD)) {
439443
i = NEXT_TX(i);
440444
td = &sp->tx_desc[i];
441-
DMA_SYNC_DESC_CPU(dev, td);
445+
dma_sync_desc_cpu(dev, td);
442446
}
443447
if (td->tdma.cntinfo & HPCDMA_XIU) {
444448
hregs->tx_ndptr = VIRT_TO_DMA(sp, td);
@@ -470,7 +474,7 @@ static inline void sgiseeq_tx(struct net_device *dev, struct sgiseeq_private *sp
470474
for (j = sp->tx_old; j != sp->tx_new; j = NEXT_TX(j)) {
471475
td = &sp->tx_desc[j];
472476

473-
DMA_SYNC_DESC_CPU(dev, td);
477+
dma_sync_desc_cpu(dev, td);
474478
if (!(td->tdma.cntinfo & (HPCDMA_XIU)))
475479
break;
476480
if (!(td->tdma.cntinfo & (HPCDMA_ETXD))) {
@@ -488,7 +492,7 @@ static inline void sgiseeq_tx(struct net_device *dev, struct sgiseeq_private *sp
488492
dev_kfree_skb_any(td->skb);
489493
td->skb = NULL;
490494
}
491-
DMA_SYNC_DESC_DEV(dev, td);
495+
dma_sync_desc_dev(dev, td);
492496
}
493497
}
494498

@@ -598,7 +602,7 @@ static int sgiseeq_start_xmit(struct sk_buff *skb, struct net_device *dev)
598602
dev->stats.tx_bytes += len;
599603
entry = sp->tx_new;
600604
td = &sp->tx_desc[entry];
601-
DMA_SYNC_DESC_CPU(dev, td);
605+
dma_sync_desc_cpu(dev, td);
602606

603607
/* Create entry. There are so many races with adding a new
604608
* descriptor to the chain:
@@ -618,14 +622,14 @@ static int sgiseeq_start_xmit(struct sk_buff *skb, struct net_device *dev)
618622
len, DMA_TO_DEVICE);
619623
td->tdma.cntinfo = (len & HPCDMA_BCNT) |
620624
HPCDMA_XIU | HPCDMA_EOXP | HPCDMA_XIE | HPCDMA_EOX;
621-
DMA_SYNC_DESC_DEV(dev, td);
625+
dma_sync_desc_dev(dev, td);
622626
if (sp->tx_old != sp->tx_new) {
623627
struct sgiseeq_tx_desc *backend;
624628

625629
backend = &sp->tx_desc[PREV_TX(sp->tx_new)];
626-
DMA_SYNC_DESC_CPU(dev, backend);
630+
dma_sync_desc_cpu(dev, backend);
627631
backend->tdma.cntinfo &= ~HPCDMA_EOX;
628-
DMA_SYNC_DESC_DEV(dev, backend);
632+
dma_sync_desc_dev(dev, backend);
629633
}
630634
sp->tx_new = NEXT_TX(sp->tx_new); /* Advance. */
631635

@@ -681,11 +685,11 @@ static inline void setup_tx_ring(struct net_device *dev,
681685
while (i < (nbufs - 1)) {
682686
buf[i].tdma.pnext = VIRT_TO_DMA(sp, buf + i + 1);
683687
buf[i].tdma.pbuf = 0;
684-
DMA_SYNC_DESC_DEV(dev, &buf[i]);
688+
dma_sync_desc_dev(dev, &buf[i]);
685689
i++;
686690
}
687691
buf[i].tdma.pnext = VIRT_TO_DMA(sp, buf);
688-
DMA_SYNC_DESC_DEV(dev, &buf[i]);
692+
dma_sync_desc_dev(dev, &buf[i]);
689693
}
690694

691695
static inline void setup_rx_ring(struct net_device *dev,
@@ -698,12 +702,12 @@ static inline void setup_rx_ring(struct net_device *dev,
698702
while (i < (nbufs - 1)) {
699703
buf[i].rdma.pnext = VIRT_TO_DMA(sp, buf + i + 1);
700704
buf[i].rdma.pbuf = 0;
701-
DMA_SYNC_DESC_DEV(dev, &buf[i]);
705+
dma_sync_desc_dev(dev, &buf[i]);
702706
i++;
703707
}
704708
buf[i].rdma.pbuf = 0;
705709
buf[i].rdma.pnext = VIRT_TO_DMA(sp, buf);
706-
DMA_SYNC_DESC_DEV(dev, &buf[i]);
710+
dma_sync_desc_dev(dev, &buf[i]);
707711
}
708712

709713
static int __init sgiseeq_probe(struct platform_device *pdev)

0 commit comments

Comments
 (0)