Skip to content

Commit

Permalink
[PATCH] mm: split highorder pages
Browse files Browse the repository at this point in the history
Have an explicit mm call to split higher order pages into individual pages.
 Should help to avoid bugs and be more explicit about the code's intention.

Signed-off-by: Nick Piggin <[email protected]>
Cc: Russell King <[email protected]>
Cc: David Howells <[email protected]>
Cc: Ralf Baechle <[email protected]>
Cc: Benjamin Herrenschmidt <[email protected]>
Cc: Paul Mundt <[email protected]>
Cc: "David S. Miller" <[email protected]>
Cc: Chris Zankel <[email protected]>
Signed-off-by: Yoichi Yuasa <[email protected]>
Signed-off-by: Andrew Morton <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]>
  • Loading branch information
Nick Piggin authored and Linus Torvalds committed Mar 22, 2006
1 parent 8e7a9aa commit 8dfcc9b
Show file tree
Hide file tree
Showing 9 changed files with 41 additions and 21 deletions.
4 changes: 2 additions & 2 deletions arch/arm/mm/consistent.c
Original file line number Diff line number Diff line change
Expand Up @@ -223,6 +223,8 @@ __dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp,
pte = consistent_pte[idx] + off;
c->vm_pages = page;

split_page(page, order);

/*
* Set the "dma handle"
*/
Expand All @@ -231,7 +233,6 @@ __dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp,
do {
BUG_ON(!pte_none(*pte));

set_page_count(page, 1);
/*
* x86 does not mark the pages reserved...
*/
Expand All @@ -250,7 +251,6 @@ __dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp,
* Free the otherwise unused pages.
*/
while (page < end) {
set_page_count(page, 1);
__free_page(page);
page++;
}
Expand Down
4 changes: 1 addition & 3 deletions arch/frv/mm/dma-alloc.c
Original file line number Diff line number Diff line change
Expand Up @@ -115,9 +115,7 @@ void *consistent_alloc(gfp_t gfp, size_t size, dma_addr_t *dma_handle)
*/
if (order > 0) {
struct page *rpage = virt_to_page(page);

for (i = 1; i < (1 << order); i++)
set_page_count(rpage + i, 1);
split_page(rpage, order);
}

err = 0;
Expand Down
5 changes: 3 additions & 2 deletions arch/mips/mm/init.c
Original file line number Diff line number Diff line change
Expand Up @@ -54,7 +54,8 @@ unsigned long empty_zero_page, zero_page_mask;
*/
unsigned long setup_zero_pages(void)
{
unsigned long order, size;
unsigned int order;
unsigned long size;
struct page *page;

if (cpu_has_vce)
Expand All @@ -67,9 +68,9 @@ unsigned long setup_zero_pages(void)
panic("Oh boy, that early out of memory?");

page = virt_to_page(empty_zero_page);
split_page(page, order);
while (page < virt_to_page(empty_zero_page + (PAGE_SIZE << order))) {
SetPageReserved(page);
set_page_count(page, 1);
page++;
}

Expand Down
4 changes: 2 additions & 2 deletions arch/ppc/kernel/dma-mapping.c
Original file line number Diff line number Diff line change
Expand Up @@ -223,6 +223,8 @@ __dma_alloc_coherent(size_t size, dma_addr_t *handle, gfp_t gfp)
pte_t *pte = consistent_pte + CONSISTENT_OFFSET(vaddr);
struct page *end = page + (1 << order);

split_page(page, order);

/*
* Set the "dma handle"
*/
Expand All @@ -231,7 +233,6 @@ __dma_alloc_coherent(size_t size, dma_addr_t *handle, gfp_t gfp)
do {
BUG_ON(!pte_none(*pte));

set_page_count(page, 1);
SetPageReserved(page);
set_pte_at(&init_mm, vaddr,
pte, mk_pte(page, pgprot_noncached(PAGE_KERNEL)));
Expand All @@ -244,7 +245,6 @@ __dma_alloc_coherent(size_t size, dma_addr_t *handle, gfp_t gfp)
* Free the otherwise unused pages.
*/
while (page < end) {
set_page_count(page, 1);
__free_page(page);
page++;
}
Expand Down
3 changes: 1 addition & 2 deletions arch/sh/mm/consistent.c
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,7 @@ void *consistent_alloc(gfp_t gfp, size_t size, dma_addr_t *handle)
page = alloc_pages(gfp, order);
if (!page)
return NULL;
split_page(page, order);

ret = page_address(page);
*handle = virt_to_phys(ret);
Expand All @@ -37,8 +38,6 @@ void *consistent_alloc(gfp_t gfp, size_t size, dma_addr_t *handle)
end = page + (1 << order);

while (++page < end) {
set_page_count(page, 1);

/* Free any unused pages */
if (page >= free) {
__free_page(page);
Expand Down
10 changes: 3 additions & 7 deletions arch/xtensa/mm/pgtable.c
Original file line number Diff line number Diff line change
Expand Up @@ -21,13 +21,9 @@ pte_t* pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
p = (pte_t*) __get_free_pages(GFP_KERNEL|__GFP_REPEAT, COLOR_ORDER);

if (likely(p)) {
struct page *page;
split_page(virt_to_page(p), COLOR_ORDER);

for (i = 0; i < COLOR_SIZE; i++) {
page = virt_to_page(p);

set_page_count(page, 1);

if (ADDR_COLOR(p) == color)
pte = p;
else
Expand Down Expand Up @@ -55,9 +51,9 @@ struct page* pte_alloc_one(struct mm_struct *mm, unsigned long address)
p = alloc_pages(GFP_KERNEL | __GFP_REPEAT, PTE_ORDER);

if (likely(p)) {
for (i = 0; i < PAGE_ORDER; i++) {
set_page_count(p, 1);
split_page(p, COLOR_ORDER);

for (i = 0; i < PAGE_ORDER; i++) {
if (PADDR_COLOR(page_address(p)) == color)
page = p;
else
Expand Down
6 changes: 6 additions & 0 deletions include/linux/mm.h
Original file line number Diff line number Diff line change
Expand Up @@ -328,6 +328,12 @@ static inline void get_page(struct page *page)

void put_page(struct page *page);

#ifdef CONFIG_MMU
void split_page(struct page *page, unsigned int order);
#else
static inline void split_page(struct page *page, unsigned int order) {}
#endif

/*
* Multiple processes may "see" the same page. E.g. for untouched
* mappings of /dev/null, all processes see the same page full of
Expand Down
4 changes: 1 addition & 3 deletions mm/memory.c
Original file line number Diff line number Diff line change
Expand Up @@ -1221,9 +1221,7 @@ static int insert_page(struct mm_struct *mm, unsigned long addr, struct page *pa
* The page has to be a nice clean _individual_ kernel allocation.
* If you allocate a compound page, you need to have marked it as
* such (__GFP_COMP), or manually just split the page up yourself
* (which is mainly an issue of doing "set_page_count(page, 1)" for
* each sub-page, and then freeing them one by one when you free
* them rather than freeing it as a compound page).
* (see split_page()).
*
* NOTE! Traditionally this was done with "remap_pfn_range()" which
* took an arbitrary page protection parameter. This doesn't allow
Expand Down
22 changes: 22 additions & 0 deletions mm/page_alloc.c
Original file line number Diff line number Diff line change
Expand Up @@ -752,6 +752,28 @@ static inline void prep_zero_page(struct page *page, int order, gfp_t gfp_flags)
clear_highpage(page + i);
}

#ifdef CONFIG_MMU
/*
* split_page takes a non-compound higher-order page, and splits it into
* n (1<<order) sub-pages: page[0..n]
* Each sub-page must be freed individually.
*
* Note: this is probably too low level an operation for use in drivers.
* Please consult with lkml before using this in your driver.
*/
void split_page(struct page *page, unsigned int order)
{
int i;

BUG_ON(PageCompound(page));
BUG_ON(!page_count(page));
for (i = 1; i < (1 << order); i++) {
BUG_ON(page_count(page + i));
set_page_count(page + i, 1);
}
}
#endif

/*
* Really, prep_compound_page() should be called from __rmqueue_bulk(). But
* we cheat by calling it from here, in the order > 0 path. Saves a branch
Expand Down

0 comments on commit 8dfcc9b

Please sign in to comment.