Skip to content

Commit

Permalink
arm64: Increase the swiotlb buffer size 64MB
Browse files Browse the repository at this point in the history
With commit 3690951 (arm64: Use swiotlb late initialisation), the
swiotlb buffer size is limited to MAX_ORDER_NR_PAGES. However, there are
platforms with 32-bit only devices that require bounce buffering via
swiotlb. This patch changes the swiotlb initialisation to an early 64MB
memblock allocation. In order to get the swiotlb buffer correctly
allocated (via memblock_virt_alloc_low_nopanic), this patch also defines
ARCH_LOW_ADDRESS_LIMIT to the maximum physical address capable of 32-bit
DMA.

Reported-by: Kefeng Wang <[email protected]>
Tested-by: Kefeng Wang <[email protected]>
Signed-off-by: Catalin Marinas <[email protected]>
  • Loading branch information
ctmarinas committed Feb 27, 2015
1 parent f6242ca commit a1e50a8
Show file tree
Hide file tree
Showing 3 changed files with 14 additions and 19 deletions.
3 changes: 2 additions & 1 deletion arch/arm64/include/asm/processor.h
Original file line number Diff line number Diff line change
Expand Up @@ -45,7 +45,8 @@
#define STACK_TOP STACK_TOP_MAX
#endif /* CONFIG_COMPAT */

#define ARCH_LOW_ADDRESS_LIMIT PHYS_MASK
extern phys_addr_t arm64_dma_phys_limit;
#define ARCH_LOW_ADDRESS_LIMIT (arm64_dma_phys_limit - 1)
#endif /* __KERNEL__ */

struct debug_info {
Expand Down
16 changes: 3 additions & 13 deletions arch/arm64/mm/dma-mapping.c
Original file line number Diff line number Diff line change
Expand Up @@ -348,8 +348,6 @@ static struct dma_map_ops swiotlb_dma_ops = {
.mapping_error = swiotlb_dma_mapping_error,
};

extern int swiotlb_late_init_with_default_size(size_t default_size);

static int __init atomic_pool_init(void)
{
pgprot_t prot = __pgprot(PROT_NORMAL_NC);
Expand Down Expand Up @@ -411,21 +409,13 @@ static int __init atomic_pool_init(void)
return -ENOMEM;
}

static int __init swiotlb_late_init(void)
static int __init arm64_dma_init(void)
{
size_t swiotlb_size = min(SZ_64M, MAX_ORDER_NR_PAGES << PAGE_SHIFT);
int ret;

dma_ops = &swiotlb_dma_ops;

return swiotlb_late_init_with_default_size(swiotlb_size);
}

static int __init arm64_dma_init(void)
{
int ret = 0;

ret |= swiotlb_late_init();
ret |= atomic_pool_init();
ret = atomic_pool_init();

return ret;
}
Expand Down
14 changes: 9 additions & 5 deletions arch/arm64/mm/init.c
Original file line number Diff line number Diff line change
Expand Up @@ -33,6 +33,7 @@
#include <linux/dma-mapping.h>
#include <linux/dma-contiguous.h>
#include <linux/efi.h>
#include <linux/swiotlb.h>

#include <asm/fixmap.h>
#include <asm/memory.h>
Expand All @@ -45,6 +46,7 @@
#include "mm.h"

phys_addr_t memstart_addr __read_mostly = 0;
phys_addr_t arm64_dma_phys_limit __read_mostly;

#ifdef CONFIG_BLK_DEV_INITRD
static int __init early_initrd(char *p)
Expand Down Expand Up @@ -85,7 +87,7 @@ static void __init zone_sizes_init(unsigned long min, unsigned long max)

/* 4GB maximum for 32-bit only capable devices */
if (IS_ENABLED(CONFIG_ZONE_DMA)) {
max_dma = PFN_DOWN(max_zone_dma_phys());
max_dma = PFN_DOWN(arm64_dma_phys_limit);
zone_size[ZONE_DMA] = max_dma - min;
}
zone_size[ZONE_NORMAL] = max - max_dma;
Expand Down Expand Up @@ -156,8 +158,6 @@ early_param("mem", early_mem);

void __init arm64_memblock_init(void)
{
phys_addr_t dma_phys_limit = 0;

memblock_enforce_memory_limit(memory_limit);

/*
Expand All @@ -174,8 +174,10 @@ void __init arm64_memblock_init(void)

/* 4GB maximum for 32-bit only capable devices */
if (IS_ENABLED(CONFIG_ZONE_DMA))
dma_phys_limit = max_zone_dma_phys();
dma_contiguous_reserve(dma_phys_limit);
arm64_dma_phys_limit = max_zone_dma_phys();
else
arm64_dma_phys_limit = PHYS_MASK + 1;
dma_contiguous_reserve(arm64_dma_phys_limit);

memblock_allow_resize();
memblock_dump_all();
Expand Down Expand Up @@ -276,6 +278,8 @@ static void __init free_unused_memmap(void)
*/
void __init mem_init(void)
{
swiotlb_init(1);

set_max_mapnr(pfn_to_page(max_pfn) - mem_map);

#ifndef CONFIG_SPARSEMEM_VMEMMAP
Expand Down

0 comments on commit a1e50a8

Please sign in to comment.