Skip to content

Commit

Permalink
rmap: drop support of non-linear mappings
Browse files Browse the repository at this point in the history
We don't create non-linear mappings anymore.  Let's drop code which
handles them in rmap.

Signed-off-by: Kirill A. Shutemov <[email protected]>
Signed-off-by: Andrew Morton <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]>
  • Loading branch information
kiryl authored and torvalds committed Feb 10, 2015
1 parent 1da4b35 commit 27ba064
Show file tree
Hide file tree
Showing 11 changed files with 18 additions and 300 deletions.
8 changes: 4 additions & 4 deletions Documentation/cachetlb.txt
Original file line number Diff line number Diff line change
Expand Up @@ -317,10 +317,10 @@ maps this page at its virtual address.
about doing this.

The idea is, first at flush_dcache_page() time, if
page->mapping->i_mmap is an empty tree and ->i_mmap_nonlinear
an empty list, just mark the architecture private page flag bit.
Later, in update_mmu_cache(), a check is made of this flag bit,
and if set the flush is done and the flag bit is cleared.
page->mapping->i_mmap is an empty tree, just mark the architecture
private page flag bit. Later, in update_mmu_cache(), a check is
made of this flag bit, and if set the flush is done and the flag
bit is cleared.

IMPORTANT NOTE: It is often important, if you defer the flush,
that the actual flush occurs on the same CPU
Expand Down
1 change: 0 additions & 1 deletion fs/inode.c
Original file line number Diff line number Diff line change
Expand Up @@ -355,7 +355,6 @@ void address_space_init_once(struct address_space *mapping)
INIT_LIST_HEAD(&mapping->private_list);
spin_lock_init(&mapping->private_lock);
mapping->i_mmap = RB_ROOT;
INIT_LIST_HEAD(&mapping->i_mmap_nonlinear);
}
EXPORT_SYMBOL(address_space_init_once);

Expand Down
4 changes: 1 addition & 3 deletions include/linux/fs.h
Original file line number Diff line number Diff line change
Expand Up @@ -401,7 +401,6 @@ struct address_space {
spinlock_t tree_lock; /* and lock protecting it */
atomic_t i_mmap_writable;/* count VM_SHARED mappings */
struct rb_root i_mmap; /* tree of private and shared mappings */
struct list_head i_mmap_nonlinear;/*list VM_NONLINEAR mappings */
struct rw_semaphore i_mmap_rwsem; /* protect tree, count, list */
/* Protected by tree_lock together with the radix tree */
unsigned long nrpages; /* number of total pages */
Expand Down Expand Up @@ -493,8 +492,7 @@ static inline void i_mmap_unlock_read(struct address_space *mapping)
*/
static inline int mapping_mapped(struct address_space *mapping)
{
return !RB_EMPTY_ROOT(&mapping->i_mmap) ||
!list_empty(&mapping->i_mmap_nonlinear);
return !RB_EMPTY_ROOT(&mapping->i_mmap);
}

/*
Expand Down
6 changes: 0 additions & 6 deletions include/linux/mm.h
Original file line number Diff line number Diff line change
Expand Up @@ -1796,12 +1796,6 @@ struct vm_area_struct *vma_interval_tree_iter_next(struct vm_area_struct *node,
for (vma = vma_interval_tree_iter_first(root, start, last); \
vma; vma = vma_interval_tree_iter_next(vma, start, last))

static inline void vma_nonlinear_insert(struct vm_area_struct *vma,
struct list_head *list)
{
list_add_tail(&vma->shared.nonlinear, list);
}

void anon_vma_interval_tree_insert(struct anon_vma_chain *node,
struct rb_root *root);
void anon_vma_interval_tree_remove(struct anon_vma_chain *node,
Expand Down
4 changes: 1 addition & 3 deletions include/linux/mm_types.h
Original file line number Diff line number Diff line change
Expand Up @@ -273,15 +273,13 @@ struct vm_area_struct {

/*
* For areas with an address space and backing store,
* linkage into the address_space->i_mmap interval tree, or
* linkage of vma in the address_space->i_mmap_nonlinear list.
* linkage into the address_space->i_mmap interval tree.
*/
union {
struct {
struct rb_node rb;
unsigned long rb_subtree_last;
} linear;
struct list_head nonlinear;
} shared;

/*
Expand Down
2 changes: 0 additions & 2 deletions include/linux/rmap.h
Original file line number Diff line number Diff line change
Expand Up @@ -246,7 +246,6 @@ int page_mapped_in_vma(struct page *page, struct vm_area_struct *vma);
* arg: passed to rmap_one() and invalid_vma()
* rmap_one: executed on each vma where page is mapped
* done: for checking traversing termination condition
* file_nonlinear: for handling file nonlinear mapping
* anon_lock: for getting anon_lock by optimized way rather than default
* invalid_vma: for skipping uninterested vma
*/
Expand All @@ -255,7 +254,6 @@ struct rmap_walk_control {
int (*rmap_one)(struct page *page, struct vm_area_struct *vma,
unsigned long addr, void *arg);
int (*done)(struct page *page);
int (*file_nonlinear)(struct page *, struct address_space *, void *arg);
struct anon_vma *(*anon_lock)(struct page *page);
bool (*invalid_vma)(struct vm_area_struct *vma, void *arg);
};
Expand Down
8 changes: 2 additions & 6 deletions kernel/fork.c
Original file line number Diff line number Diff line change
Expand Up @@ -438,12 +438,8 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
atomic_inc(&mapping->i_mmap_writable);
flush_dcache_mmap_lock(mapping);
/* insert tmp into the share list, just after mpnt */
if (unlikely(tmp->vm_flags & VM_NONLINEAR))
vma_nonlinear_insert(tmp,
&mapping->i_mmap_nonlinear);
else
vma_interval_tree_insert_after(tmp, mpnt,
&mapping->i_mmap);
vma_interval_tree_insert_after(tmp, mpnt,
&mapping->i_mmap);
flush_dcache_mmap_unlock(mapping);
i_mmap_unlock_write(mapping);
}
Expand Down
32 changes: 0 additions & 32 deletions mm/migrate.c
Original file line number Diff line number Diff line change
Expand Up @@ -178,37 +178,6 @@ static int remove_migration_pte(struct page *new, struct vm_area_struct *vma,
return SWAP_AGAIN;
}

/*
* Congratulations to trinity for discovering this bug.
* mm/fremap.c's remap_file_pages() accepts any range within a single vma to
* convert that vma to VM_NONLINEAR; and generic_file_remap_pages() will then
* replace the specified range by file ptes throughout (maybe populated after).
* If page migration finds a page within that range, while it's still located
* by vma_interval_tree rather than lost to i_mmap_nonlinear list, no problem:
* zap_pte() clears the temporary migration entry before mmap_sem is dropped.
* But if the migrating page is in a part of the vma outside the range to be
* remapped, then it will not be cleared, and remove_migration_ptes() needs to
* deal with it. Fortunately, this part of the vma is of course still linear,
* so we just need to use linear location on the nonlinear list.
*/
static int remove_linear_migration_ptes_from_nonlinear(struct page *page,
struct address_space *mapping, void *arg)
{
struct vm_area_struct *vma;
/* hugetlbfs does not support remap_pages, so no huge pgoff worries */
pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
unsigned long addr;

list_for_each_entry(vma,
&mapping->i_mmap_nonlinear, shared.nonlinear) {

addr = vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT);
if (addr >= vma->vm_start && addr < vma->vm_end)
remove_migration_pte(page, vma, addr, arg);
}
return SWAP_AGAIN;
}

/*
* Get rid of all migration entries and replace them by
* references to the indicated page.
Expand All @@ -218,7 +187,6 @@ static void remove_migration_ptes(struct page *old, struct page *new)
struct rmap_walk_control rwc = {
.rmap_one = remove_migration_pte,
.arg = old,
.file_nonlinear = remove_linear_migration_ptes_from_nonlinear,
};

rmap_walk(new, &rwc);
Expand Down
24 changes: 7 additions & 17 deletions mm/mmap.c
Original file line number Diff line number Diff line change
Expand Up @@ -243,10 +243,7 @@ static void __remove_shared_vm_struct(struct vm_area_struct *vma,
mapping_unmap_writable(mapping);

flush_dcache_mmap_lock(mapping);
if (unlikely(vma->vm_flags & VM_NONLINEAR))
list_del_init(&vma->shared.nonlinear);
else
vma_interval_tree_remove(vma, &mapping->i_mmap);
vma_interval_tree_remove(vma, &mapping->i_mmap);
flush_dcache_mmap_unlock(mapping);
}

Expand Down Expand Up @@ -649,10 +646,7 @@ static void __vma_link_file(struct vm_area_struct *vma)
atomic_inc(&mapping->i_mmap_writable);

flush_dcache_mmap_lock(mapping);
if (unlikely(vma->vm_flags & VM_NONLINEAR))
vma_nonlinear_insert(vma, &mapping->i_mmap_nonlinear);
else
vma_interval_tree_insert(vma, &mapping->i_mmap);
vma_interval_tree_insert(vma, &mapping->i_mmap);
flush_dcache_mmap_unlock(mapping);
}
}
Expand Down Expand Up @@ -789,14 +783,11 @@ again: remove_next = 1 + (end > next->vm_end);

if (file) {
mapping = file->f_mapping;
if (!(vma->vm_flags & VM_NONLINEAR)) {
root = &mapping->i_mmap;
uprobe_munmap(vma, vma->vm_start, vma->vm_end);
root = &mapping->i_mmap;
uprobe_munmap(vma, vma->vm_start, vma->vm_end);

if (adjust_next)
uprobe_munmap(next, next->vm_start,
next->vm_end);
}
if (adjust_next)
uprobe_munmap(next, next->vm_start, next->vm_end);

i_mmap_lock_write(mapping);
if (insert) {
Expand Down Expand Up @@ -3177,8 +3168,7 @@ static void vm_lock_mapping(struct mm_struct *mm, struct address_space *mapping)
*
* mmap_sem in write mode is required in order to block all operations
* that could modify pagetables and free pages without need of
* altering the vma layout (for example populate_range() with
* nonlinear vmas). It's also needed in write mode to avoid new
* altering the vma layout. It's also needed in write mode to avoid new
* anon_vmas to be associated with existing vmas.
*
* A single task can't take more than one mm_take_all_locks() in a row
Expand Down
Loading

0 comments on commit 27ba064

Please sign in to comment.