Skip to content

Commit

Permalink
Remove dma_cache_(wback|inv|wback_inv) functions
Browse files Browse the repository at this point in the history
dma_cache_(wback|inv|wback_inv) were the earliest attempt on a generalized
cache managment API for I/O purposes.  Originally it was basically the raw
MIPS low level cache API exported to the entire world.  The API has
suffered from a lack of documentation, was not very widely used unlike it's
more modern brothers and can easily be replaced by dma_cache_sync.  So
remove it rsp.  turn the surviving bits back into an arch private API, as
discussed on linux-arch.

Signed-off-by: Ralf Baechle <[email protected]>
Acked-by: Paul Mundt <[email protected]>
Acked-by: Paul Mackerras <[email protected]>
Acked-by: David S. Miller <[email protected]>
Acked-by: Kyle McMartin <[email protected]>
Acked-by: Haavard Skinnemoen <[email protected]>
Cc: <[email protected]>
Signed-off-by: Andrew Morton <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]>
  • Loading branch information
ralfbaechle authored and Linus Torvalds committed Oct 17, 2007
1 parent bc154b1 commit 622a9ed
Show file tree
Hide file tree
Showing 24 changed files with 19 additions and 193 deletions.
6 changes: 3 additions & 3 deletions arch/avr32/mm/dma-coherent.c
Original file line number Diff line number Diff line change
Expand Up @@ -21,13 +21,13 @@ void dma_cache_sync(struct device *dev, void *vaddr, size_t size, int direction)

switch (direction) {
case DMA_FROM_DEVICE: /* invalidate only */
dma_cache_inv(vaddr, size);
invalidate_dcache_region(vaddr, size);
break;
case DMA_TO_DEVICE: /* writeback only */
dma_cache_wback(vaddr, size);
clean_dcache_region(vaddr, size);
break;
case DMA_BIDIRECTIONAL: /* writeback and invalidate */
dma_cache_wback_inv(vaddr, size);
flush_dcache_region(vaddr, size);
break;
default:
BUG();
Expand Down
2 changes: 0 additions & 2 deletions arch/mips/mm/cache.c
Original file line number Diff line number Diff line change
Expand Up @@ -47,8 +47,6 @@ void (*_dma_cache_wback)(unsigned long start, unsigned long size);
void (*_dma_cache_inv)(unsigned long start, unsigned long size);

EXPORT_SYMBOL(_dma_cache_wback_inv);
EXPORT_SYMBOL(_dma_cache_wback);
EXPORT_SYMBOL(_dma_cache_inv);

#endif /* CONFIG_DMA_NONCOHERENT */

Expand Down
2 changes: 1 addition & 1 deletion arch/sh/drivers/pci/dma-dreamcast.c
Original file line number Diff line number Diff line change
Expand Up @@ -51,7 +51,7 @@ void *dreamcast_consistent_alloc(struct device *dev, size_t size,
buf = P2SEGADDR(buf);

/* Flush the dcache before we hand off the buffer */
dma_cache_wback_inv((void *)buf, size);
__flush_purge_region((void *)buf, size);

return (void *)buf;
}
Expand Down
8 changes: 4 additions & 4 deletions arch/sh/mm/consistent.c
Original file line number Diff line number Diff line change
Expand Up @@ -34,7 +34,7 @@ void *consistent_alloc(gfp_t gfp, size_t size, dma_addr_t *handle)
/*
* We must flush the cache before we pass it on to the device
*/
dma_cache_wback_inv(ret, size);
__flush_purge_region(ret, size);

page = virt_to_page(ret);
free = page + (size >> PAGE_SHIFT);
Expand Down Expand Up @@ -68,13 +68,13 @@ void consistent_sync(void *vaddr, size_t size, int direction)

switch (direction) {
case DMA_FROM_DEVICE: /* invalidate only */
dma_cache_inv(p1addr, size);
__flush_invalidate_region(p1addr, size);
break;
case DMA_TO_DEVICE: /* writeback only */
dma_cache_wback(p1addr, size);
__flush_wback_region(p1addr, size);
break;
case DMA_BIDIRECTIONAL: /* writeback and invalidate */
dma_cache_wback_inv(p1addr, size);
__flush_purge_region(p1addr, size);
break;
default:
BUG();
Expand Down
3 changes: 2 additions & 1 deletion arch/sh64/mm/consistent.c
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,7 @@
#include <linux/mm.h>
#include <linux/string.h>
#include <linux/pci.h>
#include <linux/dma-mapping.h>
#include <linux/module.h>
#include <asm/io.h>

Expand All @@ -32,7 +33,7 @@ void *consistent_alloc(struct pci_dev *hwdev, size_t size,
if (vp != NULL) {
memset(vp, 0, size);
*dma_handle = virt_to_phys(ret);
dma_cache_wback_inv((unsigned long)ret, size);
dma_cache_sync(NULL, ret, size, DMA_BIDIRECTIONAL);
}

return vp;
Expand Down
6 changes: 0 additions & 6 deletions include/asm-alpha/io.h
Original file line number Diff line number Diff line change
Expand Up @@ -551,12 +551,6 @@ extern void outsl (unsigned long port, const void *src, unsigned long count);
#endif
#define RTC_ALWAYS_BCD 0

/* Nothing to do */

#define dma_cache_inv(_start,_size) do { } while (0)
#define dma_cache_wback(_start,_size) do { } while (0)
#define dma_cache_wback_inv(_start,_size) do { } while (0)

/*
* Some mucking forons use if[n]def writeq to check if platform has it.
* It's a bloody bad idea and we probably want ARCH_HAS_WRITEQ for them
Expand Down
7 changes: 0 additions & 7 deletions include/asm-avr32/io.h
Original file line number Diff line number Diff line change
Expand Up @@ -298,13 +298,6 @@ extern void __iounmap(void __iomem *addr);
#define ioport_map(port, nr) ioremap(port, nr)
#define ioport_unmap(port) iounmap(port)

#define dma_cache_wback_inv(_start, _size) \
flush_dcache_region(_start, _size)
#define dma_cache_inv(_start, _size) \
invalidate_dcache_region(_start, _size)
#define dma_cache_wback(_start, _size) \
clean_dcache_region(_start, _size)

/*
* Convert a physical pointer to a virtual kernel pointer for /dev/mem
* access
Expand Down
4 changes: 0 additions & 4 deletions include/asm-blackfin/io.h
Original file line number Diff line number Diff line change
Expand Up @@ -183,10 +183,6 @@ extern void blkfin_inv_cache_all(void);
#define ioport_map(port, nr) ((void __iomem*)(port))
#define ioport_unmap(addr)

#define dma_cache_inv(_start,_size) do { blkfin_inv_cache_all();} while (0)
#define dma_cache_wback(_start,_size) do { } while (0)
#define dma_cache_wback_inv(_start,_size) do { blkfin_inv_cache_all();} while (0)

/* Pages to physical address... */
#define page_to_phys(page) ((page - mem_map) << PAGE_SHIFT)
#define page_to_bus(page) ((page - mem_map) << PAGE_SHIFT)
Expand Down
6 changes: 0 additions & 6 deletions include/asm-h8300/io.h
Original file line number Diff line number Diff line change
Expand Up @@ -264,12 +264,6 @@ static inline void *ioremap_fullcache(unsigned long physaddr, unsigned long size

extern void iounmap(void *addr);

/* Nothing to do */

#define dma_cache_inv(_start,_size) do { } while (0)
#define dma_cache_wback(_start,_size) do { } while (0)
#define dma_cache_wback_inv(_start,_size) do { } while (0)

/* H8/300 internal I/O functions */
static __inline__ unsigned char ctrl_inb(unsigned long addr)
{
Expand Down
4 changes: 0 additions & 4 deletions include/asm-ia64/io.h
Original file line number Diff line number Diff line change
Expand Up @@ -435,10 +435,6 @@ extern void memcpy_fromio(void *dst, const volatile void __iomem *src, long n);
extern void memcpy_toio(volatile void __iomem *dst, const void *src, long n);
extern void memset_io(volatile void __iomem *s, int c, long n);

#define dma_cache_inv(_start,_size) do { } while (0)
#define dma_cache_wback(_start,_size) do { } while (0)
#define dma_cache_wback_inv(_start,_size) do { } while (0)

# endif /* __KERNEL__ */

/*
Expand Down
6 changes: 0 additions & 6 deletions include/asm-m68k/io.h
Original file line number Diff line number Diff line change
Expand Up @@ -384,12 +384,6 @@ static inline void __iomem *ioremap_fullcache(unsigned long physaddr,
return __ioremap(physaddr, size, IOMAP_FULL_CACHING);
}


/* m68k caches aren't DMA coherent */
extern void dma_cache_wback_inv(unsigned long start, unsigned long size);
extern void dma_cache_wback(unsigned long start, unsigned long size);
extern void dma_cache_inv(unsigned long start, unsigned long size);

static inline void memset_io(volatile void __iomem *addr, unsigned char val, int count)
{
__builtin_memset((void __force *) addr, val, count);
Expand Down
6 changes: 0 additions & 6 deletions include/asm-m68knommu/io.h
Original file line number Diff line number Diff line change
Expand Up @@ -165,12 +165,6 @@ static inline void *ioremap_fullcache(unsigned long physaddr, unsigned long size

extern void iounmap(void *addr);

/* Nothing to do */

#define dma_cache_inv(_start,_size) do { } while (0)
#define dma_cache_wback(_start,_size) do { } while (0)
#define dma_cache_wback_inv(_start,_size) do { } while (0)

/* Pages to physical address... */
#define page_to_phys(page) ((page - mem_map) << PAGE_SHIFT)
#define page_to_bus(page) ((page - mem_map) << PAGE_SHIFT)
Expand Down
2 changes: 2 additions & 0 deletions include/asm-mips/io.h
Original file line number Diff line number Diff line change
Expand Up @@ -554,6 +554,8 @@ static inline void memcpy_toio(volatile void __iomem *dst, const void *src, int
* caches. Dirty lines of the caches may be written back or simply
* be discarded. This operation is necessary before dma operations
* to the memory.
*
* This API used to be exported; it now is for arch code internal use only.
*/
#ifdef CONFIG_DMA_NONCOHERENT

Expand Down
5 changes: 0 additions & 5 deletions include/asm-parisc/io.h
Original file line number Diff line number Diff line change
Expand Up @@ -270,11 +270,6 @@ extern void outsl (unsigned long port, const void *src, unsigned long count);
/* IO Port space is : BBiiii where BB is HBA number. */
#define IO_SPACE_LIMIT 0x00ffffff


#define dma_cache_inv(_start,_size) do { flush_kernel_dcache_range(_start,_size); } while (0)
#define dma_cache_wback(_start,_size) do { flush_kernel_dcache_range(_start,_size); } while (0)
#define dma_cache_wback_inv(_start,_size) do { flush_kernel_dcache_range(_start,_size); } while (0)

/* PA machines have an MM I/O space from 0xf0000000-0xffffffff in 32
* bit mode and from 0xfffffffff0000000-0xfffffffffffffff in 64 bit
* mode (essentially just sign extending. This macro takes in a 32
Expand Down
17 changes: 0 additions & 17 deletions include/asm-powerpc/io.h
Original file line number Diff line number Diff line change
Expand Up @@ -498,23 +498,6 @@ static inline void name at \
#define writeq writeq
#endif

#ifdef CONFIG_NOT_COHERENT_CACHE

#define dma_cache_inv(_start,_size) \
invalidate_dcache_range(_start, (_start + _size))
#define dma_cache_wback(_start,_size) \
clean_dcache_range(_start, (_start + _size))
#define dma_cache_wback_inv(_start,_size) \
flush_dcache_range(_start, (_start + _size))

#else /* CONFIG_NOT_COHERENT_CACHE */

#define dma_cache_inv(_start,_size) do { } while (0)
#define dma_cache_wback(_start,_size) do { } while (0)
#define dma_cache_wback_inv(_start,_size) do { } while (0)

#endif /* !CONFIG_NOT_COHERENT_CACHE */

/*
* Convert a physical pointer to a virtual kernel pointer for /dev/mem
* access
Expand Down
17 changes: 0 additions & 17 deletions include/asm-ppc/io.h
Original file line number Diff line number Diff line change
Expand Up @@ -478,23 +478,6 @@ extern void pci_iounmap(struct pci_dev *dev, void __iomem *);
#include <asm/mpc8260_pci9.h>
#endif

#ifdef CONFIG_NOT_COHERENT_CACHE

#define dma_cache_inv(_start,_size) \
invalidate_dcache_range(_start, (_start + _size))
#define dma_cache_wback(_start,_size) \
clean_dcache_range(_start, (_start + _size))
#define dma_cache_wback_inv(_start,_size) \
flush_dcache_range(_start, (_start + _size))

#else

#define dma_cache_inv(_start,_size) do { } while (0)
#define dma_cache_wback(_start,_size) do { } while (0)
#define dma_cache_wback_inv(_start,_size) do { } while (0)

#endif

/*
* Convert a physical pointer to a virtual kernel pointer for /dev/mem
* access
Expand Down
2 changes: 1 addition & 1 deletion include/asm-sh/floppy.h
Original file line number Diff line number Diff line change
Expand Up @@ -213,7 +213,7 @@ static int hard_dma_setup(char *addr, unsigned long size, int mode, int io)
}
#endif

dma_cache_wback_inv(addr, size);
__flush_purge_region(addr, size);

/* actual, physical DMA */
doing_pdma = 0;
Expand Down
25 changes: 0 additions & 25 deletions include/asm-sh/io.h
Original file line number Diff line number Diff line change
Expand Up @@ -326,31 +326,6 @@ __ioremap_mode(unsigned long offset, unsigned long size, unsigned long flags)
#define iounmap(addr) \
__iounmap((addr))

/*
* The caches on some architectures aren't dma-coherent and have need to
* handle this in software. There are three types of operations that
* can be applied to dma buffers.
*
* - dma_cache_wback_inv(start, size) makes caches and RAM coherent by
* writing the content of the caches back to memory, if necessary.
* The function also invalidates the affected part of the caches as
* necessary before DMA transfers from outside to memory.
* - dma_cache_inv(start, size) invalidates the affected parts of the
* caches. Dirty lines of the caches may be written back or simply
* be discarded. This operation is necessary before dma operations
* to the memory.
* - dma_cache_wback(start, size) writes back any dirty lines but does
* not invalidate the cache. This can be used before DMA reads from
* memory,
*/

#define dma_cache_wback_inv(_start,_size) \
__flush_purge_region(_start,_size)
#define dma_cache_inv(_start,_size) \
__flush_invalidate_region(_start,_size)
#define dma_cache_wback(_start,_size) \
__flush_wback_region(_start,_size)

/*
* Convert a physical pointer to a virtual kernel pointer for /dev/mem
* access
Expand Down
6 changes: 5 additions & 1 deletion include/asm-sh64/dma-mapping.h
Original file line number Diff line number Diff line change
Expand Up @@ -42,7 +42,11 @@ static inline void dma_free_coherent(struct device *dev, size_t size,
static inline void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
enum dma_data_direction dir)
{
dma_cache_wback_inv((unsigned long)vaddr, size);
unsigned long s = (unsigned long) vaddr & L1_CACHE_ALIGN_MASK;
unsigned long e = (vaddr + size) & L1_CACHE_ALIGN_MASK;

for (; s <= e; s += L1_CACHE_BYTES)
asm volatile ("ocbp %0, 0" : : "r" (s));
}

static inline dma_addr_t dma_map_single(struct device *dev,
Expand Down
48 changes: 0 additions & 48 deletions include/asm-sh64/io.h
Original file line number Diff line number Diff line change
Expand Up @@ -181,54 +181,6 @@ extern void iounmap(void *addr);
unsigned long onchip_remap(unsigned long addr, unsigned long size, const char* name);
extern void onchip_unmap(unsigned long vaddr);

/*
* The caches on some architectures aren't dma-coherent and have need to
* handle this in software. There are three types of operations that
* can be applied to dma buffers.
*
* - dma_cache_wback_inv(start, size) makes caches and RAM coherent by
* writing the content of the caches back to memory, if necessary.
* The function also invalidates the affected part of the caches as
* necessary before DMA transfers from outside to memory.
* - dma_cache_inv(start, size) invalidates the affected parts of the
* caches. Dirty lines of the caches may be written back or simply
* be discarded. This operation is necessary before dma operations
* to the memory.
* - dma_cache_wback(start, size) writes back any dirty lines but does
* not invalidate the cache. This can be used before DMA reads from
* memory,
*/

static __inline__ void dma_cache_wback_inv (unsigned long start, unsigned long size)
{
unsigned long s = start & L1_CACHE_ALIGN_MASK;
unsigned long e = (start + size) & L1_CACHE_ALIGN_MASK;

for (; s <= e; s += L1_CACHE_BYTES)
asm volatile ("ocbp %0, 0" : : "r" (s));
}

static __inline__ void dma_cache_inv (unsigned long start, unsigned long size)
{
// Note that caller has to be careful with overzealous
// invalidation should there be partial cache lines at the extremities
// of the specified range
unsigned long s = start & L1_CACHE_ALIGN_MASK;
unsigned long e = (start + size) & L1_CACHE_ALIGN_MASK;

for (; s <= e; s += L1_CACHE_BYTES)
asm volatile ("ocbi %0, 0" : : "r" (s));
}

static __inline__ void dma_cache_wback (unsigned long start, unsigned long size)
{
unsigned long s = start & L1_CACHE_ALIGN_MASK;
unsigned long e = (start + size) & L1_CACHE_ALIGN_MASK;

for (; s <= e; s += L1_CACHE_BYTES)
asm volatile ("ocbwb %0, 0" : : "r" (s));
}

/*
* Convert a physical pointer to a virtual kernel pointer for /dev/mem
* access
Expand Down
7 changes: 0 additions & 7 deletions include/asm-sparc/io.h
Original file line number Diff line number Diff line change
Expand Up @@ -310,13 +310,6 @@ extern void sbus_iounmap(volatile void __iomem *vaddr, unsigned long size);
#define RTC_PORT(x) (rtc_port + (x))
#define RTC_ALWAYS_BCD 0

/* Nothing to do */
/* P3: Only IDE DMA may need these. XXX Verify that it still does... */

#define dma_cache_inv(_start,_size) do { } while (0)
#define dma_cache_wback(_start,_size) do { } while (0)
#define dma_cache_wback_inv(_start,_size) do { } while (0)

#endif

#define __ARCH_HAS_NO_PAGE_ZERO_MAPPED 1
Expand Down
6 changes: 0 additions & 6 deletions include/asm-sparc64/io.h
Original file line number Diff line number Diff line change
Expand Up @@ -474,12 +474,6 @@ extern void pci_iounmap(struct pci_dev *dev, void __iomem *);
#define sbus_iounmap(__addr, __size) \
release_region((unsigned long)(__addr), (__size))

/* Nothing to do */

#define dma_cache_inv(_start,_size) do { } while (0)
#define dma_cache_wback(_start,_size) do { } while (0)
#define dma_cache_wback_inv(_start,_size) do { } while (0)

/*
* Convert a physical pointer to a virtual kernel pointer for /dev/mem
* access
Expand Down
11 changes: 1 addition & 10 deletions include/asm-x86/io_32.h
Original file line number Diff line number Diff line change
Expand Up @@ -237,18 +237,9 @@ static inline void flush_write_buffers(void)
__asm__ __volatile__ ("lock; addl $0,0(%%esp)": : :"memory");
}

#define dma_cache_inv(_start,_size) flush_write_buffers()
#define dma_cache_wback(_start,_size) flush_write_buffers()
#define dma_cache_wback_inv(_start,_size) flush_write_buffers()

#else

/* Nothing to do */

#define dma_cache_inv(_start,_size) do { } while (0)
#define dma_cache_wback(_start,_size) do { } while (0)
#define dma_cache_wback_inv(_start,_size) do { } while (0)
#define flush_write_buffers()
#define flush_write_buffers() do { } while (0)

#endif

Expand Down
Loading

0 comments on commit 622a9ed

Please sign in to comment.