Skip to content

Commit

Permalink
ARM: dma-mapping: remove offset parameter to prepare for generic dma_ops
Browse files Browse the repository at this point in the history
This patch removes the need for the offset parameter in dma bounce
functions. This is required to let dma-mapping framework on ARM
architecture to use common, generic dma_map_ops based dma-mapping
helpers.

Background and more detailed explaination:

dma_*_range_* functions are available from the early days of the dma
mapping api. They are the correct way of doing a partial syncs on the
buffer (usually used by the network device drivers). This patch changes
only the internal implementation of the dma bounce functions to let
them tunnel through dma_map_ops structure. The driver api stays
unchanged, so driver are obliged to call dma_*_range_* functions to
keep code clean and easy to understand.

The only drawback from this patch is reduced detection of the dma api
abuse. Let us consider the following code:

dma_addr = dma_map_single(dev, ptr, 64, DMA_TO_DEVICE);
dma_sync_single_range_for_cpu(dev, dma_addr+16, 0, 32, DMA_TO_DEVICE);

Without the patch such code fails, because dma bounce code is unable
to find the bounce buffer for the given dma_address. After the patch
the above sync call will be equivalent to:

dma_sync_single_range_for_cpu(dev, dma_addr, 16, 32, DMA_TO_DEVICE);

which succeeds.

I don't consider this as a real problem, because DMA API abuse should be
caught by debug_dma_* function family. This patch lets us to simplify
the internal low-level implementation without chaning the driver visible
API.

Signed-off-by: Marek Szyprowski <[email protected]>
Acked-by: Kyungmin Park <[email protected]>
Tested-By: Subash Patel <[email protected]>
  • Loading branch information
mszyprow committed May 21, 2012
1 parent 553ac78 commit a227fb9
Show file tree
Hide file tree
Showing 3 changed files with 45 additions and 39 deletions.
13 changes: 10 additions & 3 deletions arch/arm/common/dmabounce.c
Original file line number Diff line number Diff line change
Expand Up @@ -173,7 +173,8 @@ find_safe_buffer(struct dmabounce_device_info *device_info, dma_addr_t safe_dma_
read_lock_irqsave(&device_info->lock, flags);

list_for_each_entry(b, &device_info->safe_buffers, node)
if (b->safe_dma_addr == safe_dma_addr) {
if (b->safe_dma_addr <= safe_dma_addr &&
b->safe_dma_addr + b->size > safe_dma_addr) {
rb = b;
break;
}
Expand Down Expand Up @@ -362,9 +363,10 @@ void __dma_unmap_page(struct device *dev, dma_addr_t dma_addr, size_t size,
EXPORT_SYMBOL(__dma_unmap_page);

int dmabounce_sync_for_cpu(struct device *dev, dma_addr_t addr,
unsigned long off, size_t sz, enum dma_data_direction dir)
size_t sz, enum dma_data_direction dir)
{
struct safe_buffer *buf;
unsigned long off;

dev_dbg(dev, "%s(dma=%#x,off=%#lx,sz=%zx,dir=%x)\n",
__func__, addr, off, sz, dir);
Expand All @@ -373,6 +375,8 @@ int dmabounce_sync_for_cpu(struct device *dev, dma_addr_t addr,
if (!buf)
return 1;

off = addr - buf->safe_dma_addr;

BUG_ON(buf->direction != dir);

dev_dbg(dev, "%s: unsafe buffer %p (dma=%#x) mapped to %p (dma=%#x)\n",
Expand All @@ -391,9 +395,10 @@ int dmabounce_sync_for_cpu(struct device *dev, dma_addr_t addr,
EXPORT_SYMBOL(dmabounce_sync_for_cpu);

int dmabounce_sync_for_device(struct device *dev, dma_addr_t addr,
unsigned long off, size_t sz, enum dma_data_direction dir)
size_t sz, enum dma_data_direction dir)
{
struct safe_buffer *buf;
unsigned long off;

dev_dbg(dev, "%s(dma=%#x,off=%#lx,sz=%zx,dir=%x)\n",
__func__, addr, off, sz, dir);
Expand All @@ -402,6 +407,8 @@ int dmabounce_sync_for_device(struct device *dev, dma_addr_t addr,
if (!buf)
return 1;

off = addr - buf->safe_dma_addr;

BUG_ON(buf->direction != dir);

dev_dbg(dev, "%s: unsafe buffer %p (dma=%#x) mapped to %p (dma=%#x)\n",
Expand Down
67 changes: 33 additions & 34 deletions arch/arm/include/asm/dma-mapping.h
Original file line number Diff line number Diff line change
Expand Up @@ -266,19 +266,17 @@ extern void __dma_unmap_page(struct device *, dma_addr_t, size_t,
/*
* Private functions
*/
int dmabounce_sync_for_cpu(struct device *, dma_addr_t, unsigned long,
size_t, enum dma_data_direction);
int dmabounce_sync_for_device(struct device *, dma_addr_t, unsigned long,
size_t, enum dma_data_direction);
int dmabounce_sync_for_cpu(struct device *, dma_addr_t, size_t, enum dma_data_direction);
int dmabounce_sync_for_device(struct device *, dma_addr_t, size_t, enum dma_data_direction);
#else
static inline int dmabounce_sync_for_cpu(struct device *d, dma_addr_t addr,
unsigned long offset, size_t size, enum dma_data_direction dir)
size_t size, enum dma_data_direction dir)
{
return 1;
}

static inline int dmabounce_sync_for_device(struct device *d, dma_addr_t addr,
unsigned long offset, size_t size, enum dma_data_direction dir)
size_t size, enum dma_data_direction dir)
{
return 1;
}
Expand Down Expand Up @@ -401,6 +399,33 @@ static inline void dma_unmap_page(struct device *dev, dma_addr_t handle,
__dma_unmap_page(dev, handle, size, dir);
}


static inline void dma_sync_single_for_cpu(struct device *dev,
dma_addr_t handle, size_t size, enum dma_data_direction dir)
{
BUG_ON(!valid_dma_direction(dir));

debug_dma_sync_single_for_cpu(dev, handle, size, dir);

if (!dmabounce_sync_for_cpu(dev, handle, size, dir))
return;

__dma_single_dev_to_cpu(dma_to_virt(dev, handle), size, dir);
}

static inline void dma_sync_single_for_device(struct device *dev,
dma_addr_t handle, size_t size, enum dma_data_direction dir)
{
BUG_ON(!valid_dma_direction(dir));

debug_dma_sync_single_for_device(dev, handle, size, dir);

if (!dmabounce_sync_for_device(dev, handle, size, dir))
return;

__dma_single_cpu_to_dev(dma_to_virt(dev, handle), size, dir);
}

/**
* dma_sync_single_range_for_cpu
* @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
Expand All @@ -423,40 +448,14 @@ static inline void dma_sync_single_range_for_cpu(struct device *dev,
dma_addr_t handle, unsigned long offset, size_t size,
enum dma_data_direction dir)
{
BUG_ON(!valid_dma_direction(dir));

debug_dma_sync_single_for_cpu(dev, handle + offset, size, dir);

if (!dmabounce_sync_for_cpu(dev, handle, offset, size, dir))
return;

__dma_single_dev_to_cpu(dma_to_virt(dev, handle) + offset, size, dir);
dma_sync_single_for_cpu(dev, handle + offset, size, dir);
}

static inline void dma_sync_single_range_for_device(struct device *dev,
dma_addr_t handle, unsigned long offset, size_t size,
enum dma_data_direction dir)
{
BUG_ON(!valid_dma_direction(dir));

debug_dma_sync_single_for_device(dev, handle + offset, size, dir);

if (!dmabounce_sync_for_device(dev, handle, offset, size, dir))
return;

__dma_single_cpu_to_dev(dma_to_virt(dev, handle) + offset, size, dir);
}

static inline void dma_sync_single_for_cpu(struct device *dev,
dma_addr_t handle, size_t size, enum dma_data_direction dir)
{
dma_sync_single_range_for_cpu(dev, handle, 0, size, dir);
}

static inline void dma_sync_single_for_device(struct device *dev,
dma_addr_t handle, size_t size, enum dma_data_direction dir)
{
dma_sync_single_range_for_device(dev, handle, 0, size, dir);
dma_sync_single_for_device(dev, handle + offset, size, dir);
}

/*
Expand Down
4 changes: 2 additions & 2 deletions arch/arm/mm/dma-mapping.c
Original file line number Diff line number Diff line change
Expand Up @@ -660,7 +660,7 @@ void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
int i;

for_each_sg(sg, s, nents, i) {
if (!dmabounce_sync_for_cpu(dev, sg_dma_address(s), 0,
if (!dmabounce_sync_for_cpu(dev, sg_dma_address(s),
sg_dma_len(s), dir))
continue;

Expand All @@ -686,7 +686,7 @@ void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
int i;

for_each_sg(sg, s, nents, i) {
if (!dmabounce_sync_for_device(dev, sg_dma_address(s), 0,
if (!dmabounce_sync_for_device(dev, sg_dma_address(s),
sg_dma_len(s), dir))
continue;

Expand Down

0 comments on commit a227fb9

Please sign in to comment.