Skip to content

Commit

Permalink
thp: change pmd_trans_huge_lock() interface to return ptl
Browse files Browse the repository at this point in the history
After THP refcounting rework we have only two possible return values
from pmd_trans_huge_lock(): success and failure.  Return-by-pointer for
ptl doesn't make much sense in this case.

Let's convert pmd_trans_huge_lock() to return ptl on success and NULL on
failure.

Signed-off-by: Kirill A. Shutemov <[email protected]>
Suggested-by: Linus Torvalds <[email protected]>
Cc: Minchan Kim <[email protected]>
Acked-by: Michal Hocko <[email protected]>
Signed-off-by: Andrew Morton <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]>
  • Loading branch information
kiryl authored and torvalds committed Jan 22, 2016
1 parent 404a474 commit b6ec57f
Show file tree
Hide file tree
Showing 5 changed files with 36 additions and 25 deletions.
12 changes: 8 additions & 4 deletions fs/proc/task_mmu.c
Original file line number Diff line number Diff line change
Expand Up @@ -602,7 +602,8 @@ static int smaps_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
pte_t *pte;
spinlock_t *ptl;

if (pmd_trans_huge_lock(pmd, vma, &ptl)) {
ptl = pmd_trans_huge_lock(pmd, vma);
if (ptl) {
smaps_pmd_entry(pmd, addr, walk);
spin_unlock(ptl);
return 0;
Expand Down Expand Up @@ -913,7 +914,8 @@ static int clear_refs_pte_range(pmd_t *pmd, unsigned long addr,
spinlock_t *ptl;
struct page *page;

if (pmd_trans_huge_lock(pmd, vma, &ptl)) {
ptl = pmd_trans_huge_lock(pmd, vma);
if (ptl) {
if (cp->type == CLEAR_REFS_SOFT_DIRTY) {
clear_soft_dirty_pmd(vma, addr, pmd);
goto out;
Expand Down Expand Up @@ -1187,7 +1189,8 @@ static int pagemap_pmd_range(pmd_t *pmdp, unsigned long addr, unsigned long end,
int err = 0;

#ifdef CONFIG_TRANSPARENT_HUGEPAGE
if (pmd_trans_huge_lock(pmdp, vma, &ptl)) {
ptl = pmd_trans_huge_lock(pmdp, vma);
if (ptl) {
u64 flags = 0, frame = 0;
pmd_t pmd = *pmdp;

Expand Down Expand Up @@ -1519,7 +1522,8 @@ static int gather_pte_stats(pmd_t *pmd, unsigned long addr,
pte_t *orig_pte;
pte_t *pte;

if (pmd_trans_huge_lock(pmd, vma, &ptl)) {
ptl = pmd_trans_huge_lock(pmd, vma);
if (ptl) {
pte_t huge_pte = *(pte_t *)pmd;
struct page *page;

Expand Down
16 changes: 8 additions & 8 deletions include/linux/huge_mm.h
Original file line number Diff line number Diff line change
Expand Up @@ -120,15 +120,15 @@ extern void vma_adjust_trans_huge(struct vm_area_struct *vma,
unsigned long start,
unsigned long end,
long adjust_next);
extern bool __pmd_trans_huge_lock(pmd_t *pmd, struct vm_area_struct *vma,
spinlock_t **ptl);
extern spinlock_t *__pmd_trans_huge_lock(pmd_t *pmd,
struct vm_area_struct *vma);
/* mmap_sem must be held on entry */
static inline bool pmd_trans_huge_lock(pmd_t *pmd, struct vm_area_struct *vma,
spinlock_t **ptl)
static inline spinlock_t *pmd_trans_huge_lock(pmd_t *pmd,
struct vm_area_struct *vma)
{
VM_BUG_ON_VMA(!rwsem_is_locked(&vma->vm_mm->mmap_sem), vma);
if (pmd_trans_huge(*pmd) || pmd_devmap(*pmd))
return __pmd_trans_huge_lock(pmd, vma, ptl);
return __pmd_trans_huge_lock(pmd, vma);
else
return false;
}
Expand Down Expand Up @@ -190,10 +190,10 @@ static inline void vma_adjust_trans_huge(struct vm_area_struct *vma,
long adjust_next)
{
}
static inline bool pmd_trans_huge_lock(pmd_t *pmd, struct vm_area_struct *vma,
spinlock_t **ptl)
static inline spinlock_t *pmd_trans_huge_lock(pmd_t *pmd,
struct vm_area_struct *vma)
{
return false;
return NULL;
}

static inline int do_huge_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma,
Expand Down
24 changes: 14 additions & 10 deletions mm/huge_memory.c
Original file line number Diff line number Diff line change
Expand Up @@ -1560,7 +1560,8 @@ int madvise_free_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
struct mm_struct *mm = tlb->mm;
int ret = 0;

if (!pmd_trans_huge_lock(pmd, vma, &ptl))
ptl = pmd_trans_huge_lock(pmd, vma);
if (!ptl)
goto out_unlocked;

orig_pmd = *pmd;
Expand Down Expand Up @@ -1627,7 +1628,8 @@ int zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
pmd_t orig_pmd;
spinlock_t *ptl;

if (!__pmd_trans_huge_lock(pmd, vma, &ptl))
ptl = __pmd_trans_huge_lock(pmd, vma);
if (!ptl)
return 0;
/*
* For architectures like ppc64 we look at deposited pgtable
Expand Down Expand Up @@ -1690,7 +1692,8 @@ bool move_huge_pmd(struct vm_area_struct *vma, struct vm_area_struct *new_vma,
* We don't have to worry about the ordering of src and dst
* ptlocks because exclusive mmap_sem prevents deadlock.
*/
if (__pmd_trans_huge_lock(old_pmd, vma, &old_ptl)) {
old_ptl = __pmd_trans_huge_lock(old_pmd, vma);
if (old_ptl) {
new_ptl = pmd_lockptr(mm, new_pmd);
if (new_ptl != old_ptl)
spin_lock_nested(new_ptl, SINGLE_DEPTH_NESTING);
Expand Down Expand Up @@ -1724,7 +1727,8 @@ int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
spinlock_t *ptl;
int ret = 0;

if (__pmd_trans_huge_lock(pmd, vma, &ptl)) {
ptl = __pmd_trans_huge_lock(pmd, vma);
if (ptl) {
pmd_t entry;
bool preserve_write = prot_numa && pmd_write(*pmd);
ret = 1;
Expand Down Expand Up @@ -1760,14 +1764,14 @@ int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
* Note that if it returns true, this routine returns without unlocking page
* table lock. So callers must unlock it.
*/
bool __pmd_trans_huge_lock(pmd_t *pmd, struct vm_area_struct *vma,
spinlock_t **ptl)
spinlock_t *__pmd_trans_huge_lock(pmd_t *pmd, struct vm_area_struct *vma)
{
*ptl = pmd_lock(vma->vm_mm, pmd);
spinlock_t *ptl;
ptl = pmd_lock(vma->vm_mm, pmd);
if (likely(pmd_trans_huge(*pmd) || pmd_devmap(*pmd)))
return true;
spin_unlock(*ptl);
return false;
return ptl;
spin_unlock(ptl);
return NULL;
}

#define VM_NO_THP (VM_SPECIAL | VM_HUGETLB | VM_SHARED | VM_MAYSHARE)
Expand Down
6 changes: 4 additions & 2 deletions mm/memcontrol.c
Original file line number Diff line number Diff line change
Expand Up @@ -4638,7 +4638,8 @@ static int mem_cgroup_count_precharge_pte_range(pmd_t *pmd,
pte_t *pte;
spinlock_t *ptl;

if (pmd_trans_huge_lock(pmd, vma, &ptl)) {
ptl = pmd_trans_huge_lock(pmd, vma);
if (ptl) {
if (get_mctgt_type_thp(vma, addr, *pmd, NULL) == MC_TARGET_PAGE)
mc.precharge += HPAGE_PMD_NR;
spin_unlock(ptl);
Expand Down Expand Up @@ -4826,7 +4827,8 @@ static int mem_cgroup_move_charge_pte_range(pmd_t *pmd,
union mc_target target;
struct page *page;

if (pmd_trans_huge_lock(pmd, vma, &ptl)) {
ptl = pmd_trans_huge_lock(pmd, vma);
if (ptl) {
if (mc.precharge < HPAGE_PMD_NR) {
spin_unlock(ptl);
return 0;
Expand Down
3 changes: 2 additions & 1 deletion mm/mincore.c
Original file line number Diff line number Diff line change
Expand Up @@ -117,7 +117,8 @@ static int mincore_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
unsigned char *vec = walk->private;
int nr = (end - addr) >> PAGE_SHIFT;

if (pmd_trans_huge_lock(pmd, vma, &ptl)) {
ptl = pmd_trans_huge_lock(pmd, vma);
if (ptl) {
memset(vec, 1, nr);
spin_unlock(ptl);
goto out;
Expand Down

0 comments on commit b6ec57f

Please sign in to comment.