Skip to content

Commit

Permalink
mm/folio: replace set_compound_order with folio_set_order
Browse files Browse the repository at this point in the history
The patch ("mm/folio: Avoid special handling for order value 0 in
folio_set_order") [1] removed the need for special handling of order = 0
in folio_set_order.  Now, folio_set_order and set_compound_order becomes
similar function.  This patch removes the set_compound_order and uses
folio_set_order instead.

[1] https://lore.kernel.org/all/[email protected]/

Link: https://lkml.kernel.org/r/[email protected]
Signed-off-by: Tarun Sahu <[email protected]>
Reviewed-by Sidhartha Kumar <[email protected]>
Reviewed-by: Muchun Song <[email protected]>
Cc: Aneesh Kumar K.V <[email protected]>
Cc: Gerald Schaefer <[email protected]>
Cc: Matthew Wilcox <[email protected]>
Cc: Mike Kravetz <[email protected]>
Signed-off-by: Andrew Morton <[email protected]>
  • Loading branch information
jj-tarun authored and akpm00 committed Jun 19, 2023
1 parent 0bb4884 commit 1e3be48
Show file tree
Hide file tree
Showing 2 changed files with 16 additions and 26 deletions.
10 changes: 0 additions & 10 deletions include/linux/mm.h
Original file line number Diff line number Diff line change
Expand Up @@ -1232,16 +1232,6 @@ static inline void folio_set_compound_dtor(struct folio *folio,

void destroy_large_folio(struct folio *folio);

static inline void set_compound_order(struct page *page, unsigned int order)
{
struct folio *folio = (struct folio *)page;

folio->_folio_order = order;
#ifdef CONFIG_64BIT
folio->_folio_nr_pages = 1U << order;
#endif
}

/* Returns the number of bytes in this potentially compound page. */
static inline unsigned long page_size(struct page *page)
{
Expand Down
32 changes: 16 additions & 16 deletions mm/internal.h
Original file line number Diff line number Diff line change
Expand Up @@ -387,12 +387,27 @@ extern void memblock_free_pages(struct page *page, unsigned long pfn,
unsigned int order);
extern void __free_pages_core(struct page *page, unsigned int order);

/*
* This will have no effect, other than possibly generating a warning, if the
* caller passes in a non-large folio.
*/
static inline void folio_set_order(struct folio *folio, unsigned int order)
{
if (WARN_ON_ONCE(!order || !folio_test_large(folio)))
return;

folio->_folio_order = order;
#ifdef CONFIG_64BIT
folio->_folio_nr_pages = 1U << order;
#endif
}

static inline void prep_compound_head(struct page *page, unsigned int order)
{
struct folio *folio = (struct folio *)page;

folio_set_compound_dtor(folio, COMPOUND_PAGE_DTOR);
set_compound_order(page, order);
folio_set_order(folio, order);
atomic_set(&folio->_entire_mapcount, -1);
atomic_set(&folio->_nr_pages_mapped, 0);
atomic_set(&folio->_pincount, 0);
Expand Down Expand Up @@ -432,21 +447,6 @@ void memmap_init_range(unsigned long, int, unsigned long, unsigned long,
int split_free_page(struct page *free_page,
unsigned int order, unsigned long split_pfn_offset);

/*
* This will have no effect, other than possibly generating a warning, if the
* caller passes in a non-large folio.
*/
static inline void folio_set_order(struct folio *folio, unsigned int order)
{
if (WARN_ON_ONCE(!order || !folio_test_large(folio)))
return;

folio->_folio_order = order;
#ifdef CONFIG_64BIT
folio->_folio_nr_pages = 1U << order;
#endif
}

#if defined CONFIG_COMPACTION || defined CONFIG_CMA

/*
Expand Down

0 comments on commit 1e3be48

Please sign in to comment.