Skip to content

Commit

Permalink
asm-generic/tlb: Introduce CONFIG_HAVE_MMU_GATHER_NO_GATHER=y
Browse files Browse the repository at this point in the history
Add the Kconfig option HAVE_MMU_GATHER_NO_GATHER to the generic
mmu_gather code. If the option is set the mmu_gather will not
track individual pages for delayed page free anymore. A platform
that enables the option needs to provide its own implementation
of the __tlb_remove_page_size() function to free pages.

No change in behavior intended.

Signed-off-by: Martin Schwidefsky <[email protected]>
Signed-off-by: Peter Zijlstra (Intel) <[email protected]>
Acked-by: Will Deacon <[email protected]>
Cc: Andrew Morton <[email protected]>
Cc: Andy Lutomirski <[email protected]>
Cc: Borislav Petkov <[email protected]>
Cc: Dave Hansen <[email protected]>
Cc: H. Peter Anvin <[email protected]>
Cc: Linus Torvalds <[email protected]>
Cc: Peter Zijlstra <[email protected]>
Cc: Rik van Riel <[email protected]>
Cc: Thomas Gleixner <[email protected]>
Cc: [email protected]
Cc: [email protected]
Cc: [email protected]
Cc: [email protected]
Link: http://lkml.kernel.org/r/[email protected]
Signed-off-by: Ingo Molnar <[email protected]>
  • Loading branch information
Martin Schwidefsky authored and Ingo Molnar committed Apr 3, 2019
1 parent 6137fed commit 952a31c
Show file tree
Hide file tree
Showing 3 changed files with 70 additions and 49 deletions.
3 changes: 3 additions & 0 deletions arch/Kconfig
Original file line number Diff line number Diff line change
Expand Up @@ -389,6 +389,9 @@ config HAVE_RCU_TABLE_NO_INVALIDATE
config HAVE_MMU_GATHER_PAGE_SIZE
bool

config HAVE_MMU_GATHER_NO_GATHER
bool

config ARCH_HAVE_NMI_SAFE_CMPXCHG
bool

Expand Down
9 changes: 7 additions & 2 deletions include/asm-generic/tlb.h
Original file line number Diff line number Diff line change
Expand Up @@ -191,6 +191,7 @@ extern void tlb_remove_table(struct mmu_gather *tlb, void *table);

#endif

#ifndef CONFIG_HAVE_MMU_GATHER_NO_GATHER
/*
* If we can't allocate a page to make a big batch of page pointers
* to work on, then just handle a few from the on-stack structure.
Expand All @@ -215,6 +216,10 @@ struct mmu_gather_batch {
*/
#define MAX_GATHER_BATCH_COUNT (10000UL/MAX_GATHER_BATCH)

extern bool __tlb_remove_page_size(struct mmu_gather *tlb, struct page *page,
int page_size);
#endif

/*
* struct mmu_gather is an opaque type used by the mm code for passing around
* any data needed by arch specific code for tlb_remove_page.
Expand Down Expand Up @@ -261,13 +266,15 @@ struct mmu_gather {

unsigned int batch_count;

#ifndef CONFIG_HAVE_MMU_GATHER_NO_GATHER
struct mmu_gather_batch *active;
struct mmu_gather_batch local;
struct page *__pages[MMU_GATHER_BUNDLE];

#ifdef CONFIG_HAVE_MMU_GATHER_PAGE_SIZE
unsigned int page_size;
#endif
#endif
};

void arch_tlb_gather_mmu(struct mmu_gather *tlb,
Expand All @@ -276,8 +283,6 @@ void tlb_flush_mmu(struct mmu_gather *tlb);
void arch_tlb_finish_mmu(struct mmu_gather *tlb,
unsigned long start, unsigned long end, bool force);
void tlb_flush_mmu_free(struct mmu_gather *tlb);
extern bool __tlb_remove_page_size(struct mmu_gather *tlb, struct page *page,
int page_size);

static inline void __tlb_adjust_range(struct mmu_gather *tlb,
unsigned long address,
Expand Down
107 changes: 60 additions & 47 deletions mm/mmu_gather.c
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,8 @@

#ifdef HAVE_GENERIC_MMU_GATHER

#ifndef CONFIG_HAVE_MMU_GATHER_NO_GATHER

static bool tlb_next_batch(struct mmu_gather *tlb)
{
struct mmu_gather_batch *batch;
Expand Down Expand Up @@ -41,19 +43,72 @@ static bool tlb_next_batch(struct mmu_gather *tlb)
return true;
}

static void tlb_batch_pages_flush(struct mmu_gather *tlb)
{
struct mmu_gather_batch *batch;

for (batch = &tlb->local; batch && batch->nr; batch = batch->next) {
free_pages_and_swap_cache(batch->pages, batch->nr);
batch->nr = 0;
}
tlb->active = &tlb->local;
}

static void tlb_batch_list_free(struct mmu_gather *tlb)
{
struct mmu_gather_batch *batch, *next;

for (batch = tlb->local.next; batch; batch = next) {
next = batch->next;
free_pages((unsigned long)batch, 0);
}
tlb->local.next = NULL;
}

bool __tlb_remove_page_size(struct mmu_gather *tlb, struct page *page, int page_size)
{
struct mmu_gather_batch *batch;

VM_BUG_ON(!tlb->end);

#ifdef CONFIG_HAVE_MMU_GATHER_PAGE_SIZE
VM_WARN_ON(tlb->page_size != page_size);
#endif

batch = tlb->active;
/*
* Add the page and check if we are full. If so
* force a flush.
*/
batch->pages[batch->nr++] = page;
if (batch->nr == batch->max) {
if (!tlb_next_batch(tlb))
return true;
batch = tlb->active;
}
VM_BUG_ON_PAGE(batch->nr > batch->max, page);

return false;
}

#endif /* HAVE_MMU_GATHER_NO_GATHER */

void arch_tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm,
unsigned long start, unsigned long end)
{
tlb->mm = mm;

/* Is it from 0 to ~0? */
tlb->fullmm = !(start | (end+1));

#ifndef CONFIG_HAVE_MMU_GATHER_NO_GATHER
tlb->need_flush_all = 0;
tlb->local.next = NULL;
tlb->local.nr = 0;
tlb->local.max = ARRAY_SIZE(tlb->__pages);
tlb->active = &tlb->local;
tlb->batch_count = 0;
#endif

#ifdef CONFIG_HAVE_RCU_TABLE_FREE
tlb->batch = NULL;
Expand All @@ -67,16 +122,12 @@ void arch_tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm,

void tlb_flush_mmu_free(struct mmu_gather *tlb)
{
struct mmu_gather_batch *batch;

#ifdef CONFIG_HAVE_RCU_TABLE_FREE
tlb_table_flush(tlb);
#endif
for (batch = &tlb->local; batch && batch->nr; batch = batch->next) {
free_pages_and_swap_cache(batch->pages, batch->nr);
batch->nr = 0;
}
tlb->active = &tlb->local;
#ifndef CONFIG_HAVE_MMU_GATHER_NO_GATHER
tlb_batch_pages_flush(tlb);
#endif
}

void tlb_flush_mmu(struct mmu_gather *tlb)
Expand All @@ -92,8 +143,6 @@ void tlb_flush_mmu(struct mmu_gather *tlb)
void arch_tlb_finish_mmu(struct mmu_gather *tlb,
unsigned long start, unsigned long end, bool force)
{
struct mmu_gather_batch *batch, *next;

if (force) {
__tlb_reset_range(tlb);
__tlb_adjust_range(tlb, start, end - start);
Expand All @@ -103,45 +152,9 @@ void arch_tlb_finish_mmu(struct mmu_gather *tlb,

/* keep the page table cache within bounds */
check_pgt_cache();

for (batch = tlb->local.next; batch; batch = next) {
next = batch->next;
free_pages((unsigned long)batch, 0);
}
tlb->local.next = NULL;
}

/* __tlb_remove_page
* Must perform the equivalent to __free_pte(pte_get_and_clear(ptep)), while
* handling the additional races in SMP caused by other CPUs caching valid
* mappings in their TLBs. Returns the number of free page slots left.
* When out of page slots we must call tlb_flush_mmu().
*returns true if the caller should flush.
*/
bool __tlb_remove_page_size(struct mmu_gather *tlb, struct page *page, int page_size)
{
struct mmu_gather_batch *batch;

VM_BUG_ON(!tlb->end);

#ifdef CONFIG_HAVE_MMU_GATHER_PAGE_SIZE
VM_WARN_ON(tlb->page_size != page_size);
#ifndef CONFIG_HAVE_MMU_GATHER_NO_GATHER
tlb_batch_list_free(tlb);
#endif

batch = tlb->active;
/*
* Add the page and check if we are full. If so
* force a flush.
*/
batch->pages[batch->nr++] = page;
if (batch->nr == batch->max) {
if (!tlb_next_batch(tlb))
return true;
batch = tlb->active;
}
VM_BUG_ON_PAGE(batch->nr > batch->max, page);

return false;
}

#endif /* HAVE_GENERIC_MMU_GATHER */
Expand Down

0 comments on commit 952a31c

Please sign in to comment.