Skip to content

Commit

Permalink
mm: split a folio in minimum folio order chunks
Browse files Browse the repository at this point in the history
split_folio() and split_folio_to_list() assume order 0, to support
minorder for non-anonymous folios, we must expand these to check the
folio mapping order and use that.

Set new_order to be at least minimum folio order if it is set in
split_huge_page_to_list() so that we can maintain minimum folio order
requirement in the page cache.

Update the debugfs write files used for testing to ensure the order
is respected as well. We simply enforce the min order when a file
mapping is used.

Signed-off-by: Luis Chamberlain <[email protected]>
Signed-off-by: Pankaj Raghav <[email protected]>
Link: https://lore.kernel.org/r/[email protected] # folded fix
Link: https://lore.kernel.org/r/[email protected]
Tested-by: David Howells <[email protected]>
Reviewed-by: Hannes Reinecke <[email protected]>
Reviewed-by: Zi Yan <[email protected]>
Signed-off-by: Christian Brauner <[email protected]>
  • Loading branch information
mcgrof authored and brauner committed Sep 2, 2024
1 parent 26cfdb3 commit e220917
Show file tree
Hide file tree
Showing 2 changed files with 85 additions and 8 deletions.
28 changes: 24 additions & 4 deletions include/linux/huge_mm.h
Original file line number Diff line number Diff line change
Expand Up @@ -96,6 +96,8 @@ extern struct kobj_attribute thpsize_shmem_enabled_attr;
#define thp_vma_allowable_order(vma, vm_flags, tva_flags, order) \
(!!thp_vma_allowable_orders(vma, vm_flags, tva_flags, BIT(order)))

#define split_folio(f) split_folio_to_list(f, NULL)

#ifdef CONFIG_PGTABLE_HAS_HUGE_LEAVES
#define HPAGE_PMD_SHIFT PMD_SHIFT
#define HPAGE_PUD_SHIFT PUD_SHIFT
Expand Down Expand Up @@ -317,9 +319,24 @@ unsigned long thp_get_unmapped_area_vmflags(struct file *filp, unsigned long add
bool can_split_folio(struct folio *folio, int *pextra_pins);
int split_huge_page_to_list_to_order(struct page *page, struct list_head *list,
unsigned int new_order);
int min_order_for_split(struct folio *folio);
int split_folio_to_list(struct folio *folio, struct list_head *list);
static inline int split_huge_page(struct page *page)
{
return split_huge_page_to_list_to_order(page, NULL, 0);
struct folio *folio = page_folio(page);
int ret = min_order_for_split(folio);

if (ret < 0)
return ret;

/*
* split_huge_page() locks the page before splitting and
* expects the same page that has been split to be locked when
* returned. split_folio(page_folio(page)) cannot be used here
* because it converts the page to folio and passes the head
* page to be split.
*/
return split_huge_page_to_list_to_order(page, NULL, ret);
}
void deferred_split_folio(struct folio *folio);

Expand Down Expand Up @@ -484,6 +501,12 @@ static inline int split_huge_page(struct page *page)
{
return 0;
}

static inline int split_folio_to_list(struct folio *folio, struct list_head *list)
{
return 0;
}

static inline void deferred_split_folio(struct folio *folio) {}
#define split_huge_pmd(__vma, __pmd, __address) \
do { } while (0)
Expand Down Expand Up @@ -598,7 +621,4 @@ static inline int split_folio_to_order(struct folio *folio, int new_order)
return split_folio_to_list_to_order(folio, NULL, new_order);
}

#define split_folio_to_list(f, l) split_folio_to_list_to_order(f, l, 0)
#define split_folio(f) split_folio_to_order(f, 0)

#endif /* _LINUX_HUGE_MM_H */
65 changes: 61 additions & 4 deletions mm/huge_memory.c
Original file line number Diff line number Diff line change
Expand Up @@ -3082,6 +3082,9 @@ bool can_split_folio(struct folio *folio, int *pextra_pins)
* released, or if some unexpected race happened (e.g., anon VMA disappeared,
* truncation).
*
* Callers should ensure that the order respects the address space mapping
* min-order if one is set for non-anonymous folios.
*
* Returns -EINVAL when trying to split to an order that is incompatible
* with the folio. Splitting to order 0 is compatible with all folios.
*/
Expand Down Expand Up @@ -3163,6 +3166,7 @@ int split_huge_page_to_list_to_order(struct page *page, struct list_head *list,
mapping = NULL;
anon_vma_lock_write(anon_vma);
} else {
unsigned int min_order;
gfp_t gfp;

mapping = folio->mapping;
Expand All @@ -3173,6 +3177,14 @@ int split_huge_page_to_list_to_order(struct page *page, struct list_head *list,
goto out;
}

min_order = mapping_min_folio_order(folio->mapping);
if (new_order < min_order) {
VM_WARN_ONCE(1, "Cannot split mapped folio below min-order: %u",
min_order);
ret = -EINVAL;
goto out;
}

gfp = current_gfp_context(mapping_gfp_mask(mapping) &
GFP_RECLAIM_MASK);

Expand Down Expand Up @@ -3285,6 +3297,30 @@ int split_huge_page_to_list_to_order(struct page *page, struct list_head *list,
return ret;
}

int min_order_for_split(struct folio *folio)
{
if (folio_test_anon(folio))
return 0;

if (!folio->mapping) {
if (folio_test_pmd_mappable(folio))
count_vm_event(THP_SPLIT_PAGE_FAILED);
return -EBUSY;
}

return mapping_min_folio_order(folio->mapping);
}

int split_folio_to_list(struct folio *folio, struct list_head *list)
{
int ret = min_order_for_split(folio);

if (ret < 0)
return ret;

return split_huge_page_to_list_to_order(&folio->page, list, ret);
}

void __folio_undo_large_rmappable(struct folio *folio)
{
struct deferred_split *ds_queue;
Expand Down Expand Up @@ -3515,6 +3551,8 @@ static int split_huge_pages_pid(int pid, unsigned long vaddr_start,
struct vm_area_struct *vma = vma_lookup(mm, addr);
struct page *page;
struct folio *folio;
struct address_space *mapping;
unsigned int target_order = new_order;

if (!vma)
break;
Expand All @@ -3535,7 +3573,13 @@ static int split_huge_pages_pid(int pid, unsigned long vaddr_start,
if (!is_transparent_hugepage(folio))
goto next;

if (new_order >= folio_order(folio))
if (!folio_test_anon(folio)) {
mapping = folio->mapping;
target_order = max(new_order,
mapping_min_folio_order(mapping));
}

if (target_order >= folio_order(folio))
goto next;

total++;
Expand All @@ -3551,9 +3595,14 @@ static int split_huge_pages_pid(int pid, unsigned long vaddr_start,
if (!folio_trylock(folio))
goto next;

if (!split_folio_to_order(folio, new_order))
if (!folio_test_anon(folio) && folio->mapping != mapping)
goto unlock;

if (!split_folio_to_order(folio, target_order))
split++;

unlock:

folio_unlock(folio);
next:
folio_put(folio);
Expand All @@ -3578,6 +3627,8 @@ static int split_huge_pages_in_file(const char *file_path, pgoff_t off_start,
pgoff_t index;
int nr_pages = 1;
unsigned long total = 0, split = 0;
unsigned int min_order;
unsigned int target_order;

file = getname_kernel(file_path);
if (IS_ERR(file))
Expand All @@ -3591,6 +3642,8 @@ static int split_huge_pages_in_file(const char *file_path, pgoff_t off_start,
file_path, off_start, off_end);

mapping = candidate->f_mapping;
min_order = mapping_min_folio_order(mapping);
target_order = max(new_order, min_order);

for (index = off_start; index < off_end; index += nr_pages) {
struct folio *folio = filemap_get_folio(mapping, index);
Expand All @@ -3605,15 +3658,19 @@ static int split_huge_pages_in_file(const char *file_path, pgoff_t off_start,
total++;
nr_pages = folio_nr_pages(folio);

if (new_order >= folio_order(folio))
if (target_order >= folio_order(folio))
goto next;

if (!folio_trylock(folio))
goto next;

if (!split_folio_to_order(folio, new_order))
if (folio->mapping != mapping)
goto unlock;

if (!split_folio_to_order(folio, target_order))
split++;

unlock:
folio_unlock(folio);
next:
folio_put(folio);
Expand Down

0 comments on commit e220917

Please sign in to comment.