Skip to content

Commit

Permalink
ia64: remove custom __early_pfn_to_nid()
Browse files Browse the repository at this point in the history
The ia64 implementation of __early_pfn_to_nid() essentially relies on the
same data as the generic implementation.

The correspondence between memory ranges and nodes is set in memblock
during early memory initialization in register_active_ranges() function.

The initialization of sparsemem that requires early_pfn_to_nid() happens
later and it can use the memblock information like the other architectures.

Link: https://lkml.kernel.org/r/[email protected]
Signed-off-by: Mike Rapoport <[email protected]>
Cc: Alexey Dobriyan <[email protected]>
Cc: Catalin Marinas <[email protected]>
Cc: Geert Uytterhoeven <[email protected]>
Cc: Greg Ungerer <[email protected]>
Cc: John Paul Adrian Glaubitz <[email protected]>
Cc: Jonathan Corbet <[email protected]>
Cc: Matt Turner <[email protected]>
Cc: Meelis Roos <[email protected]>
Cc: Michael Schmitz <[email protected]>
Cc: Russell King <[email protected]>
Cc: Tony Luck <[email protected]>
Cc: Vineet Gupta <[email protected]>
Cc: Will Deacon <[email protected]>
Signed-off-by: Andrew Morton <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]>
  • Loading branch information
rppt authored and torvalds committed Dec 15, 2020
1 parent 36d4029 commit 03e92a5
Show file tree
Hide file tree
Showing 5 changed files with 12 additions and 51 deletions.
3 changes: 0 additions & 3 deletions arch/ia64/Kconfig
Original file line number Diff line number Diff line change
Expand Up @@ -342,9 +342,6 @@ config HOLES_IN_ZONE
bool
default y if VIRTUAL_MEM_MAP

config HAVE_ARCH_EARLY_PFN_TO_NID
def_bool NUMA && SPARSEMEM

config HAVE_ARCH_NODEDATA_EXTENSION
def_bool y
depends on NUMA
Expand Down
30 changes: 0 additions & 30 deletions arch/ia64/mm/numa.c
Original file line number Diff line number Diff line change
Expand Up @@ -58,36 +58,6 @@ paddr_to_nid(unsigned long paddr)
EXPORT_SYMBOL(paddr_to_nid);

#if defined(CONFIG_SPARSEMEM) && defined(CONFIG_NUMA)
/*
* Because of holes evaluate on section limits.
* If the section of memory exists, then return the node where the section
* resides. Otherwise return node 0 as the default. This is used by
* SPARSEMEM to allocate the SPARSEMEM sectionmap on the NUMA node where
* the section resides.
*/
int __meminit __early_pfn_to_nid(unsigned long pfn,
struct mminit_pfnnid_cache *state)
{
int i, section = pfn >> PFN_SECTION_SHIFT, ssec, esec;

if (section >= state->last_start && section < state->last_end)
return state->last_nid;

for (i = 0; i < num_node_memblks; i++) {
ssec = node_memblk[i].start_paddr >> PA_SECTION_SHIFT;
esec = (node_memblk[i].start_paddr + node_memblk[i].size +
((1L << PA_SECTION_SHIFT) - 1)) >> PA_SECTION_SHIFT;
if (section >= ssec && section < esec) {
state->last_start = ssec;
state->last_end = esec;
state->last_nid = node_memblk[i].nid;
return node_memblk[i].nid;
}
}

return -1;
}

void numa_clear_node(int cpu)
{
unmap_cpu_from_node(cpu, NUMA_NO_NODE);
Expand Down
3 changes: 0 additions & 3 deletions include/linux/mm.h
Original file line number Diff line number Diff line change
Expand Up @@ -2434,9 +2434,6 @@ static inline int early_pfn_to_nid(unsigned long pfn)
#else
/* please see mm/page_alloc.c */
extern int __meminit early_pfn_to_nid(unsigned long pfn);
/* there is a per-arch backend function. */
extern int __meminit __early_pfn_to_nid(unsigned long pfn,
struct mminit_pfnnid_cache *state);
#endif

extern void set_dma_reserve(unsigned long new_dma_reserve);
Expand Down
11 changes: 0 additions & 11 deletions include/linux/mmzone.h
Original file line number Diff line number Diff line change
Expand Up @@ -1428,17 +1428,6 @@ void sparse_init(void);
#define subsection_map_init(_pfn, _nr_pages) do {} while (0)
#endif /* CONFIG_SPARSEMEM */

/*
* During memory init memblocks map pfns to nids. The search is expensive and
* this caches recent lookups. The implementation of __early_pfn_to_nid
* may treat start/end as pfns or sections.
*/
struct mminit_pfnnid_cache {
unsigned long last_start;
unsigned long last_end;
int last_nid;
};

/*
* If it is possible to have holes within a MAX_ORDER_NR_PAGES, then we
* need to check pfn validity within that MAX_ORDER_NR_PAGES block.
Expand Down
16 changes: 12 additions & 4 deletions mm/page_alloc.c
Original file line number Diff line number Diff line change
Expand Up @@ -1559,14 +1559,23 @@ void __free_pages_core(struct page *page, unsigned int order)

#ifdef CONFIG_NEED_MULTIPLE_NODES

static struct mminit_pfnnid_cache early_pfnnid_cache __meminitdata;
/*
* During memory init memblocks map pfns to nids. The search is expensive and
* this caches recent lookups. The implementation of __early_pfn_to_nid
* treats start/end as pfns.
*/
struct mminit_pfnnid_cache {
unsigned long last_start;
unsigned long last_end;
int last_nid;
};

#ifndef CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID
static struct mminit_pfnnid_cache early_pfnnid_cache __meminitdata;

/*
* Required by SPARSEMEM. Given a PFN, return what node the PFN is on.
*/
int __meminit __early_pfn_to_nid(unsigned long pfn,
static int __meminit __early_pfn_to_nid(unsigned long pfn,
struct mminit_pfnnid_cache *state)
{
unsigned long start_pfn, end_pfn;
Expand All @@ -1584,7 +1593,6 @@ int __meminit __early_pfn_to_nid(unsigned long pfn,

return nid;
}
#endif /* CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID */

int __meminit early_pfn_to_nid(unsigned long pfn)
{
Expand Down

0 comments on commit 03e92a5

Please sign in to comment.