Skip to content
This repository has been archived by the owner on Apr 10, 2019. It is now read-only.

Commit

Permalink
unicore32 core architecture: mm related: consistent device DMA handling
Browse files Browse the repository at this point in the history
This patch implements consistent device DMA handling of memory management.
DMA device operations are also here.

Signed-off-by: Guan Xuetao <[email protected]>
Reviewed-by: Arnd Bergmann <[email protected]>
  • Loading branch information
gxt committed Mar 17, 2011
1 parent 56372b0 commit 10c9c10
Show file tree
Hide file tree
Showing 10 changed files with 1,207 additions and 0 deletions.
211 changes: 211 additions & 0 deletions arch/unicore32/include/asm/cacheflush.h
Original file line number Diff line number Diff line change
@@ -0,0 +1,211 @@
/*
* linux/arch/unicore32/include/asm/cacheflush.h
*
* Code specific to PKUnity SoC and UniCore ISA
*
* Copyright (C) 2001-2010 GUAN Xue-tao
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#ifndef __UNICORE_CACHEFLUSH_H__
#define __UNICORE_CACHEFLUSH_H__

#include <linux/mm.h>

#include <asm/shmparam.h>

#define CACHE_COLOUR(vaddr) ((vaddr & (SHMLBA - 1)) >> PAGE_SHIFT)

/*
* This flag is used to indicate that the page pointed to by a pte is clean
* and does not require cleaning before returning it to the user.
*/
#define PG_dcache_clean PG_arch_1

/*
* MM Cache Management
* ===================
*
* The arch/unicore32/mm/cache.S files implement these methods.
*
* Start addresses are inclusive and end addresses are exclusive;
* start addresses should be rounded down, end addresses up.
*
* See Documentation/cachetlb.txt for more information.
* Please note that the implementation of these, and the required
* effects are cache-type (VIVT/VIPT/PIPT) specific.
*
* flush_icache_all()
*
* Unconditionally clean and invalidate the entire icache.
* Currently only needed for cache-v6.S and cache-v7.S, see
* __flush_icache_all for the generic implementation.
*
* flush_kern_all()
*
* Unconditionally clean and invalidate the entire cache.
*
* flush_user_all()
*
* Clean and invalidate all user space cache entries
* before a change of page tables.
*
* flush_user_range(start, end, flags)
*
* Clean and invalidate a range of cache entries in the
* specified address space before a change of page tables.
* - start - user start address (inclusive, page aligned)
* - end - user end address (exclusive, page aligned)
* - flags - vma->vm_flags field
*
* coherent_kern_range(start, end)
*
* Ensure coherency between the Icache and the Dcache in the
* region described by start, end. If you have non-snooping
* Harvard caches, you need to implement this function.
* - start - virtual start address
* - end - virtual end address
*
* coherent_user_range(start, end)
*
* Ensure coherency between the Icache and the Dcache in the
* region described by start, end. If you have non-snooping
* Harvard caches, you need to implement this function.
* - start - virtual start address
* - end - virtual end address
*
* flush_kern_dcache_area(kaddr, size)
*
* Ensure that the data held in page is written back.
* - kaddr - page address
* - size - region size
*
* DMA Cache Coherency
* ===================
*
* dma_flush_range(start, end)
*
* Clean and invalidate the specified virtual address range.
* - start - virtual start address
* - end - virtual end address
*/

extern void __cpuc_flush_icache_all(void);
extern void __cpuc_flush_kern_all(void);
extern void __cpuc_flush_user_all(void);
extern void __cpuc_flush_user_range(unsigned long, unsigned long, unsigned int);
extern void __cpuc_coherent_kern_range(unsigned long, unsigned long);
extern void __cpuc_coherent_user_range(unsigned long, unsigned long);
extern void __cpuc_flush_dcache_area(void *, size_t);
extern void __cpuc_flush_kern_dcache_area(void *addr, size_t size);

/*
* These are private to the dma-mapping API. Do not use directly.
* Their sole purpose is to ensure that data held in the cache
* is visible to DMA, or data written by DMA to system memory is
* visible to the CPU.
*/
extern void __cpuc_dma_clean_range(unsigned long, unsigned long);
extern void __cpuc_dma_flush_range(unsigned long, unsigned long);

/*
* Copy user data from/to a page which is mapped into a different
* processes address space. Really, we want to allow our "user
* space" model to handle this.
*/
extern void copy_to_user_page(struct vm_area_struct *, struct page *,
unsigned long, void *, const void *, unsigned long);
#define copy_from_user_page(vma, page, vaddr, dst, src, len) \
do { \
memcpy(dst, src, len); \
} while (0)

/*
* Convert calls to our calling convention.
*/
/* Invalidate I-cache */
static inline void __flush_icache_all(void)
{
asm("movc p0.c5, %0, #20;\n"
"nop; nop; nop; nop; nop; nop; nop; nop\n"
:
: "r" (0));
}

#define flush_cache_all() __cpuc_flush_kern_all()

extern void flush_cache_mm(struct mm_struct *mm);
extern void flush_cache_range(struct vm_area_struct *vma,
unsigned long start, unsigned long end);
extern void flush_cache_page(struct vm_area_struct *vma,
unsigned long user_addr, unsigned long pfn);

#define flush_cache_dup_mm(mm) flush_cache_mm(mm)

/*
* flush_cache_user_range is used when we want to ensure that the
* Harvard caches are synchronised for the user space address range.
* This is used for the UniCore private sys_cacheflush system call.
*/
#define flush_cache_user_range(vma, start, end) \
__cpuc_coherent_user_range((start) & PAGE_MASK, PAGE_ALIGN(end))

/*
* Perform necessary cache operations to ensure that data previously
* stored within this range of addresses can be executed by the CPU.
*/
#define flush_icache_range(s, e) __cpuc_coherent_kern_range(s, e)

/*
* Perform necessary cache operations to ensure that the TLB will
* see data written in the specified area.
*/
#define clean_dcache_area(start, size) cpu_dcache_clean_area(start, size)

/*
* flush_dcache_page is used when the kernel has written to the page
* cache page at virtual address page->virtual.
*
* If this page isn't mapped (ie, page_mapping == NULL), or it might
* have userspace mappings, then we _must_ always clean + invalidate
* the dcache entries associated with the kernel mapping.
*
* Otherwise we can defer the operation, and clean the cache when we are
* about to change to user space. This is the same method as used on SPARC64.
* See update_mmu_cache for the user space part.
*/
#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
extern void flush_dcache_page(struct page *);

#define flush_dcache_mmap_lock(mapping) \
spin_lock_irq(&(mapping)->tree_lock)
#define flush_dcache_mmap_unlock(mapping) \
spin_unlock_irq(&(mapping)->tree_lock)

#define flush_icache_user_range(vma, page, addr, len) \
flush_dcache_page(page)

/*
* We don't appear to need to do anything here. In fact, if we did, we'd
* duplicate cache flushing elsewhere performed by flush_dcache_page().
*/
#define flush_icache_page(vma, page) do { } while (0)

/*
* flush_cache_vmap() is used when creating mappings (eg, via vmap,
* vmalloc, ioremap etc) in kernel space for pages. On non-VIPT
* caches, since the direct-mappings of these pages may contain cached
* data, we need to do a full cache flush to ensure that writebacks
* don't corrupt data placed into these pages via the new mappings.
*/
static inline void flush_cache_vmap(unsigned long start, unsigned long end)
{
}

static inline void flush_cache_vunmap(unsigned long start, unsigned long end)
{
}

#endif
124 changes: 124 additions & 0 deletions arch/unicore32/include/asm/dma-mapping.h
Original file line number Diff line number Diff line change
@@ -0,0 +1,124 @@
/*
* linux/arch/unicore32/include/asm/dma-mapping.h
*
* Code specific to PKUnity SoC and UniCore ISA
*
* Copyright (C) 2001-2010 GUAN Xue-tao
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#ifndef __UNICORE_DMA_MAPPING_H__
#define __UNICORE_DMA_MAPPING_H__

#ifdef __KERNEL__

#include <linux/mm_types.h>
#include <linux/scatterlist.h>
#include <linux/swiotlb.h>

#include <asm-generic/dma-coherent.h>

#include <asm/memory.h>
#include <asm/cacheflush.h>

extern struct dma_map_ops swiotlb_dma_map_ops;

static inline struct dma_map_ops *get_dma_ops(struct device *dev)
{
return &swiotlb_dma_map_ops;
}

static inline int dma_supported(struct device *dev, u64 mask)
{
struct dma_map_ops *dma_ops = get_dma_ops(dev);

if (unlikely(dma_ops == NULL))
return 0;

return dma_ops->dma_supported(dev, mask);
}

static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
{
struct dma_map_ops *dma_ops = get_dma_ops(dev);

if (dma_ops->mapping_error)
return dma_ops->mapping_error(dev, dma_addr);

return 0;
}

#include <asm-generic/dma-mapping-common.h>

static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
{
if (dev && dev->dma_mask)
return addr + size - 1 <= *dev->dma_mask;

return 1;
}

static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr)
{
return paddr;
}

static inline phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr)
{
return daddr;
}

static inline void dma_mark_clean(void *addr, size_t size) {}

static inline int dma_set_mask(struct device *dev, u64 dma_mask)
{
if (!dev->dma_mask || !dma_supported(dev, dma_mask))
return -EIO;

*dev->dma_mask = dma_mask;

return 0;
}

static inline void *dma_alloc_coherent(struct device *dev, size_t size,
dma_addr_t *dma_handle, gfp_t flag)
{
struct dma_map_ops *dma_ops = get_dma_ops(dev);

return dma_ops->alloc_coherent(dev, size, dma_handle, flag);
}

static inline void dma_free_coherent(struct device *dev, size_t size,
void *cpu_addr, dma_addr_t dma_handle)
{
struct dma_map_ops *dma_ops = get_dma_ops(dev);

dma_ops->free_coherent(dev, size, cpu_addr, dma_handle);
}

#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)

static inline void dma_cache_sync(struct device *dev, void *vaddr,
size_t size, enum dma_data_direction direction)
{
unsigned long start = (unsigned long)vaddr;
unsigned long end = start + size;

switch (direction) {
case DMA_NONE:
BUG();
case DMA_FROM_DEVICE:
case DMA_BIDIRECTIONAL: /* writeback and invalidate */
__cpuc_dma_flush_range(start, end);
break;
case DMA_TO_DEVICE: /* writeback only */
__cpuc_dma_clean_range(start, end);
break;
}
}

#endif /* __KERNEL__ */
#endif
23 changes: 23 additions & 0 deletions arch/unicore32/include/asm/dma.h
Original file line number Diff line number Diff line change
@@ -0,0 +1,23 @@
/*
* linux/arch/unicore32/include/asm/dma.h
*
* Code specific to PKUnity SoC and UniCore ISA
*
* Copyright (C) 2001-2010 GUAN Xue-tao
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/

#ifndef __UNICORE_DMA_H__
#define __UNICORE_DMA_H__

#include <asm/memory.h>
#include <asm-generic/dma.h>

#ifdef CONFIG_PCI
extern int isa_dma_bridge_buggy;
#endif

#endif /* __UNICORE_DMA_H__ */
Loading

0 comments on commit 10c9c10

Please sign in to comment.