Skip to content

Commit

Permalink
arm, xtensa: simplify initialization of high memory pages
Browse files Browse the repository at this point in the history
free_highpages() in both arm and xtensa essentially open-code
for_each_free_mem_range() loop to detect high memory pages that were not
reserved and that should be initialized and passed to the buddy allocator.

Replace open-coded implementation of for_each_free_mem_range() with usage
of memblock API to simplify the code.

Signed-off-by: Mike Rapoport <[email protected]>
Signed-off-by: Andrew Morton <[email protected]>
Tested-by: Max Filippov <[email protected]>	[xtensa]
Reviewed-by: Max Filippov <[email protected]>	[xtensa]
Cc: Andy Lutomirski <[email protected]>
Cc: Baoquan He <[email protected]>
Cc: Benjamin Herrenschmidt <[email protected]>
Cc: Borislav Petkov <[email protected]>
Cc: Catalin Marinas <[email protected]>
Cc: Christoph Hellwig <[email protected]>
Cc: Daniel Axtens <[email protected]>
Cc: Dave Hansen <[email protected]>
Cc: Emil Renner Berthing <[email protected]>
Cc: Hari Bathini <[email protected]>
Cc: Ingo Molnar <[email protected]>
Cc: Ingo Molnar <[email protected]>
Cc: Jonathan Cameron <[email protected]>
Cc: Marek Szyprowski <[email protected]>
Cc: Michael Ellerman <[email protected]>
Cc: Michal Simek <[email protected]>
Cc: Miguel Ojeda <[email protected]>
Cc: Palmer Dabbelt <[email protected]>
Cc: Paul Mackerras <[email protected]>
Cc: Paul Walmsley <[email protected]>
Cc: Peter Zijlstra <[email protected]>
Cc: Russell King <[email protected]>
Cc: Stafford Horne <[email protected]>
Cc: Thomas Bogendoerfer <[email protected]>
Cc: Thomas Gleixner <[email protected]>
Cc: Will Deacon <[email protected]>
Cc: Yoshinori Sato <[email protected]>
Link: https://lkml.kernel.org/r/[email protected]
Signed-off-by: Linus Torvalds <[email protected]>
  • Loading branch information
rppt authored and torvalds committed Oct 14, 2020
1 parent e9aa36c commit cddb5dd
Show file tree
Hide file tree
Showing 2 changed files with 18 additions and 85 deletions.
48 changes: 8 additions & 40 deletions arch/arm/mm/init.c
Original file line number Diff line number Diff line change
Expand Up @@ -347,61 +347,29 @@ static void __init free_unused_memmap(void)
#endif
}

#ifdef CONFIG_HIGHMEM
static inline void free_area_high(unsigned long pfn, unsigned long end)
{
for (; pfn < end; pfn++)
free_highmem_page(pfn_to_page(pfn));
}
#endif

static void __init free_highpages(void)
{
#ifdef CONFIG_HIGHMEM
unsigned long max_low = max_low_pfn;
struct memblock_region *mem, *res;
phys_addr_t range_start, range_end;
u64 i;

/* set highmem page free */
for_each_memblock(memory, mem) {
unsigned long start = memblock_region_memory_base_pfn(mem);
unsigned long end = memblock_region_memory_end_pfn(mem);
for_each_free_mem_range(i, NUMA_NO_NODE, MEMBLOCK_NONE,
&range_start, &range_end, NULL) {
unsigned long start = PHYS_PFN(range_start);
unsigned long end = PHYS_PFN(range_end);

/* Ignore complete lowmem entries */
if (end <= max_low)
continue;

if (memblock_is_nomap(mem))
continue;

/* Truncate partial highmem entries */
if (start < max_low)
start = max_low;

/* Find and exclude any reserved regions */
for_each_memblock(reserved, res) {
unsigned long res_start, res_end;

res_start = memblock_region_reserved_base_pfn(res);
res_end = memblock_region_reserved_end_pfn(res);

if (res_end < start)
continue;
if (res_start < start)
res_start = start;
if (res_start > end)
res_start = end;
if (res_end > end)
res_end = end;
if (res_start != start)
free_area_high(start, res_start);
start = res_end;
if (start == end)
break;
}

/* And now free anything which remains */
if (start < end)
free_area_high(start, end);
for (; start < end; start++)
free_highmem_page(pfn_to_page(start));
}
#endif
}
Expand Down
55 changes: 10 additions & 45 deletions arch/xtensa/mm/init.c
Original file line number Diff line number Diff line change
Expand Up @@ -79,67 +79,32 @@ void __init zones_init(void)
free_area_init(max_zone_pfn);
}

#ifdef CONFIG_HIGHMEM
static void __init free_area_high(unsigned long pfn, unsigned long end)
{
for (; pfn < end; pfn++)
free_highmem_page(pfn_to_page(pfn));
}

static void __init free_highpages(void)
{
#ifdef CONFIG_HIGHMEM
unsigned long max_low = max_low_pfn;
struct memblock_region *mem, *res;
phys_addr_t range_start, range_end;
u64 i;

reset_all_zones_managed_pages();
/* set highmem page free */
for_each_memblock(memory, mem) {
unsigned long start = memblock_region_memory_base_pfn(mem);
unsigned long end = memblock_region_memory_end_pfn(mem);
for_each_free_mem_range(i, NUMA_NO_NODE, MEMBLOCK_NONE,
&range_start, &range_end, NULL) {
unsigned long start = PHYS_PFN(range_start);
unsigned long end = PHYS_PFN(range_end);

/* Ignore complete lowmem entries */
if (end <= max_low)
continue;

if (memblock_is_nomap(mem))
continue;

/* Truncate partial highmem entries */
if (start < max_low)
start = max_low;

/* Find and exclude any reserved regions */
for_each_memblock(reserved, res) {
unsigned long res_start, res_end;

res_start = memblock_region_reserved_base_pfn(res);
res_end = memblock_region_reserved_end_pfn(res);

if (res_end < start)
continue;
if (res_start < start)
res_start = start;
if (res_start > end)
res_start = end;
if (res_end > end)
res_end = end;
if (res_start != start)
free_area_high(start, res_start);
start = res_end;
if (start == end)
break;
}

/* And now free anything which remains */
if (start < end)
free_area_high(start, end);
for (; start < end; start++)
free_highmem_page(pfn_to_page(start));
}
}
#else
static void __init free_highpages(void)
{
}
#endif
}

/*
* Initialize memory pages.
Expand Down

0 comments on commit cddb5dd

Please sign in to comment.