Skip to content

Commit

Permalink
mm: nominate faultaround area in bytes rather than page order
Browse files Browse the repository at this point in the history
There is evidencs that the faultaround feature is less relevant on
architectures with page size bigger then 4k.  Which makes sense since page
fault overhead per byte of mapped area should be less there.

Let's rework the feature to specify faultaround area in bytes instead of
page order.  It's 64 kilobytes for now.

The patch effectively disables faultaround on architectures with page size
>= 64k (like ppc64).

It's possible that some other size of faultaround area is relevant for a
platform.  We can expose `fault_around_bytes' variable to arch-specific
code once such platforms will be found.

Signed-off-by: Kirill A. Shutemov <[email protected]>
Cc: Rusty Russell <[email protected]>
Cc: Hugh Dickins <[email protected]>
Cc: Madhavan Srinivasan <[email protected]>
Cc: Benjamin Herrenschmidt <[email protected]>
Cc: Paul Mackerras <[email protected]>
Cc: Rik van Riel <[email protected]>
Cc: Mel Gorman <[email protected]>
Cc: Andi Kleen <[email protected]>
Cc: Peter Zijlstra <[email protected]>
Cc: Ingo Molnar <[email protected]>
Cc: Dave Hansen <[email protected]>
Signed-off-by: Andrew Morton <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]>
  • Loading branch information
kiryl authored and torvalds committed Jun 4, 2014
1 parent 7d01817 commit a9b0f86
Showing 1 changed file with 23 additions and 39 deletions.
62 changes: 23 additions & 39 deletions mm/memory.c
Original file line number Diff line number Diff line change
Expand Up @@ -2758,63 +2758,47 @@ void do_set_pte(struct vm_area_struct *vma, unsigned long address,
update_mmu_cache(vma, address, pte);
}

#define FAULT_AROUND_ORDER 4
static unsigned long fault_around_bytes = 65536;

static inline unsigned long fault_around_pages(void)
{
return rounddown_pow_of_two(fault_around_bytes) / PAGE_SIZE;
}

static inline unsigned long fault_around_mask(void)
{
return ~(rounddown_pow_of_two(fault_around_bytes) - 1) & PAGE_MASK;
}

#ifdef CONFIG_DEBUG_FS
static unsigned int fault_around_order = FAULT_AROUND_ORDER;

static int fault_around_order_get(void *data, u64 *val)
#ifdef CONFIG_DEBUG_FS
static int fault_around_bytes_get(void *data, u64 *val)
{
*val = fault_around_order;
*val = fault_around_bytes;
return 0;
}

static int fault_around_order_set(void *data, u64 val)
static int fault_around_bytes_set(void *data, u64 val)
{
BUILD_BUG_ON((1UL << FAULT_AROUND_ORDER) > PTRS_PER_PTE);
if (1UL << val > PTRS_PER_PTE)
if (val / PAGE_SIZE > PTRS_PER_PTE)
return -EINVAL;
fault_around_order = val;
fault_around_bytes = val;
return 0;
}
DEFINE_SIMPLE_ATTRIBUTE(fault_around_order_fops,
fault_around_order_get, fault_around_order_set, "%llu\n");
DEFINE_SIMPLE_ATTRIBUTE(fault_around_bytes_fops,
fault_around_bytes_get, fault_around_bytes_set, "%llu\n");

static int __init fault_around_debugfs(void)
{
void *ret;

ret = debugfs_create_file("fault_around_order", 0644, NULL, NULL,
&fault_around_order_fops);
ret = debugfs_create_file("fault_around_bytes", 0644, NULL, NULL,
&fault_around_bytes_fops);
if (!ret)
pr_warn("Failed to create fault_around_order in debugfs");
pr_warn("Failed to create fault_around_bytes in debugfs");
return 0;
}
late_initcall(fault_around_debugfs);

static inline unsigned long fault_around_pages(void)
{
return 1UL << fault_around_order;
}

static inline unsigned long fault_around_mask(void)
{
return ~((1UL << (PAGE_SHIFT + fault_around_order)) - 1);
}
#else
static inline unsigned long fault_around_pages(void)
{
unsigned long nr_pages;

nr_pages = 1UL << FAULT_AROUND_ORDER;
BUILD_BUG_ON(nr_pages > PTRS_PER_PTE);
return nr_pages;
}

static inline unsigned long fault_around_mask(void)
{
return ~((1UL << (PAGE_SHIFT + FAULT_AROUND_ORDER)) - 1);
}
#endif

static void do_fault_around(struct vm_area_struct *vma, unsigned long address,
Expand Down Expand Up @@ -2871,7 +2855,7 @@ static int do_read_fault(struct mm_struct *mm, struct vm_area_struct *vma,
* if page by the offset is not ready to be mapped (cold cache or
* something).
*/
if (vma->vm_ops->map_pages) {
if (vma->vm_ops->map_pages && fault_around_pages() > 1) {
pte = pte_offset_map_lock(mm, pmd, address, &ptl);
do_fault_around(vma, address, pte, pgoff, flags);
if (!pte_same(*pte, orig_pte))
Expand Down

0 comments on commit a9b0f86

Please sign in to comment.